complete overhaul with pre-commit hooks

This commit is contained in:
Mustafa Soylu
2025-06-07 18:47:18 +02:00
parent 3971dba9bf
commit 62cbeeb513
71 changed files with 4200 additions and 2820 deletions

View File

@@ -1,11 +1,12 @@
import os
import json
import traceback
from deezspot.spotloader import SpoLogin
from deezspot.deezloader import DeeLogin
from pathlib import Path
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds, get_spotify_blob_path
from routes.utils.celery_config import get_config_params
from routes.utils.credentials import (
get_credential,
_get_global_spotify_api_creds,
get_spotify_blob_path,
)
def download_album(
url,
@@ -23,56 +24,68 @@ def download_album(
max_retries=3,
progress_callback=None,
convert_to=None,
bitrate=None
bitrate=None,
):
try:
# Detect URL source (Spotify or Deezer) from URL
is_spotify_url = 'open.spotify.com' in url.lower()
is_deezer_url = 'deezer.com' in url.lower()
service = ''
is_spotify_url = "open.spotify.com" in url.lower()
is_deezer_url = "deezer.com" in url.lower()
service = ""
if is_spotify_url:
service = 'spotify'
service = "spotify"
elif is_deezer_url:
service = 'deezer'
service = "deezer"
else:
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
print(f"ERROR: {error_msg}")
raise ValueError(error_msg)
print(f"DEBUG: album.py - Service determined from URL: {service}")
print(f"DEBUG: album.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'")
print(
f"DEBUG: album.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
)
# Get global Spotify API credentials
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds()
global_spotify_client_id, global_spotify_client_secret = (
_get_global_spotify_api_creds()
)
if not global_spotify_client_id or not global_spotify_client_secret:
warning_msg = "WARN: album.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
print(warning_msg)
if service == 'spotify':
if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None: quality = 'FLAC' # Deezer quality for first attempt
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails)
if service == "spotify":
if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None:
quality = "FLAC" # Deezer quality for first attempt
if fall_quality is None:
fall_quality = (
"HIGH" # Spotify quality for fallback (if Deezer fails)
)
deezer_error = None
try:
# Attempt 1: Deezer via download_albumspo (using 'fallback' as Deezer account name)
print(f"DEBUG: album.py - Spotify URL. Attempt 1: Deezer (account: {fallback})")
deezer_fallback_creds = get_credential('deezer', fallback)
arl = deezer_fallback_creds.get('arl')
print(
f"DEBUG: album.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
)
deezer_fallback_creds = get_credential("deezer", fallback)
arl = deezer_fallback_creds.get("arl")
if not arl:
raise ValueError(f"ARL not found for Deezer account '{fallback}'.")
raise ValueError(
f"ARL not found for Deezer account '{fallback}'."
)
dl = DeeLogin(
arl=arl,
spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback
progress_callback=progress_callback,
)
dl.download_albumspo(
link_album=url, # Spotify URL
link_album=url, # Spotify URL
output_dir="./downloads",
quality_download=quality, # Deezer quality
quality_download=quality, # Deezer quality
recursive_quality=True,
recursive_download=False,
not_interface=False,
@@ -85,34 +98,49 @@ def download_album(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL."
)
print(f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL.")
except Exception as e:
deezer_error = e
print(f"ERROR: album.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}")
print(
f"ERROR: album.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
)
traceback.print_exc()
print(f"DEBUG: album.py - Attempting Spotify direct download (account: {main} for blob)...")
print(
f"DEBUG: album.py - Attempting Spotify direct download (account: {main} for blob)..."
)
# Attempt 2: Spotify direct via download_album (using 'main' as Spotify account for blob)
try:
if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
if (
not global_spotify_client_id
or not global_spotify_client_secret
):
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
blob_file_path = get_spotify_blob_path(main)
if not blob_file_path or not blob_file_path.exists():
raise FileNotFoundError(f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}")
raise FileNotFoundError(
f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}"
)
spo = SpoLogin(
credentials_path=str(blob_file_path), # Ensure it's a string
credentials_path=str(
blob_file_path
), # Ensure it's a string
spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback
progress_callback=progress_callback,
)
spo.download_album(
link_album=url, # Spotify URL
link_album=url, # Spotify URL
output_dir="./downloads",
quality_download=fall_quality, # Spotify quality
quality_download=fall_quality, # Spotify quality
recursive_quality=True,
recursive_download=False,
not_interface=False,
@@ -126,36 +154,47 @@ def download_album(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful."
)
print(f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful.")
except Exception as e2:
print(f"ERROR: album.py - Spotify direct download (account: {main} for blob) also failed: {e2}")
print(
f"ERROR: album.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
)
raise RuntimeError(
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
f"Deezer error: {deezer_error}, Spotify error: {e2}"
) from e2
else:
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
if quality is None: quality = 'HIGH' # Default Spotify quality
print(f"DEBUG: album.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}")
if quality is None:
quality = "HIGH" # Default Spotify quality
print(
f"DEBUG: album.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
)
if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
blob_file_path = get_spotify_blob_path(main)
if not blob_file_path or not blob_file_path.exists():
raise FileNotFoundError(f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}")
raise FileNotFoundError(
f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}"
)
spo = SpoLogin(
credentials_path=str(blob_file_path), # Ensure it's a string
credentials_path=str(blob_file_path), # Ensure it's a string
spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback
progress_callback=progress_callback,
)
spo.download_album(
link_album=url,
output_dir="./downloads",
quality_download=quality,
quality_download=quality,
recursive_quality=True,
recursive_download=False,
not_interface=False,
@@ -169,26 +208,31 @@ def download_album(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful.")
elif service == 'deezer':
print(
f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful."
)
elif service == "deezer":
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
if quality is None: quality = 'FLAC' # Default Deezer quality
print(f"DEBUG: album.py - Deezer URL. Direct download with Deezer account: {main}")
deezer_main_creds = get_credential('deezer', main) # For ARL
arl = deezer_main_creds.get('arl')
if quality is None:
quality = "FLAC" # Default Deezer quality
print(
f"DEBUG: album.py - Deezer URL. Direct download with Deezer account: {main}"
)
deezer_main_creds = get_credential("deezer", main) # For ARL
arl = deezer_main_creds.get("arl")
if not arl:
raise ValueError(f"ARL not found for Deezer account '{main}'.")
dl = DeeLogin(
arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback
arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback,
)
dl.download_albumdee( # Deezer URL, download via Deezer
dl.download_albumdee( # Deezer URL, download via Deezer
link_album=url,
output_dir="./downloads",
quality_download=quality,
@@ -203,9 +247,11 @@ def download_album(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: album.py - Direct Deezer download (account: {main}) successful."
)
print(f"DEBUG: album.py - Direct Deezer download (account: {main}) successful.")
else:
# Should be caught by initial service check, but as a safeguard
raise ValueError(f"Unsupported service determined: {service}")

View File

@@ -1,10 +1,7 @@
import json
import traceback
from pathlib import Path
import os
import logging
from flask import Blueprint, Response, request, url_for
from routes.utils.celery_queue_manager import download_queue_manager, get_config_params
from flask import url_for
from routes.utils.celery_queue_manager import download_queue_manager
from routes.utils.get_info import get_spotify_info
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
from routes.utils.celery_tasks import get_last_task_status, ProgressState
@@ -15,12 +12,18 @@ from deezspot.libutils.utils import get_ids, link_is_valid
# Configure logging
logger = logging.getLogger(__name__)
def log_json(message_dict):
"""Helper function to output a JSON-formatted log message."""
print(json.dumps(message_dict))
def get_artist_discography(url, main_spotify_account_name, album_type='album,single,compilation,appears_on', progress_callback=None):
def get_artist_discography(
url,
main_spotify_account_name,
album_type="album,single,compilation,appears_on",
progress_callback=None,
):
"""
Validate the URL, extract the artist ID, and retrieve the discography.
Uses global Spotify API client_id/secret for Spo initialization.
@@ -34,28 +37,41 @@ def get_artist_discography(url, main_spotify_account_name, album_type='album,sin
log_json({"status": "error", "message": "No artist URL provided."})
raise ValueError("No artist URL provided.")
link_is_valid(link=url) # This will raise an exception if the link is invalid.
link_is_valid(link=url) # This will raise an exception if the link is invalid.
client_id, client_secret = _get_global_spotify_api_creds()
if not client_id or not client_secret:
log_json({"status": "error", "message": "Global Spotify API client_id or client_secret not configured."})
log_json(
{
"status": "error",
"message": "Global Spotify API client_id or client_secret not configured.",
}
)
raise ValueError("Global Spotify API credentials are not configured.")
if not main_spotify_account_name:
# This is a warning now, as API keys are global.
logger.warning("main_spotify_account_name not provided for get_artist_discography context. Using global API keys.")
logger.warning(
"main_spotify_account_name not provided for get_artist_discography context. Using global API keys."
)
else:
# Check if account exists for context, good for consistency
try:
get_credential('spotify', main_spotify_account_name)
logger.debug(f"Spotify account context '{main_spotify_account_name}' exists for get_artist_discography.")
get_credential("spotify", main_spotify_account_name)
logger.debug(
f"Spotify account context '{main_spotify_account_name}' exists for get_artist_discography."
)
except FileNotFoundError:
logger.warning(f"Spotify account '{main_spotify_account_name}' provided for discography context not found.")
logger.warning(
f"Spotify account '{main_spotify_account_name}' provided for discography context not found."
)
except Exception as e:
logger.warning(f"Error checking Spotify account '{main_spotify_account_name}' for discography context: {e}")
logger.warning(
f"Error checking Spotify account '{main_spotify_account_name}' for discography context: {e}"
)
Spo.__init__(client_id, client_secret) # Initialize with global API keys
Spo.__init__(client_id, client_secret) # Initialize with global API keys
try:
artist_id = get_ids(url)
@@ -78,94 +94,108 @@ def get_artist_discography(url, main_spotify_account_name, album_type='album,sin
raise
def download_artist_albums(url, album_type="album,single,compilation", request_args=None):
def download_artist_albums(
url, album_type="album,single,compilation", request_args=None
):
"""
Download albums by an artist, filtered by album types.
Args:
url (str): Spotify artist URL
album_type (str): Comma-separated list of album types to download
(album, single, compilation, appears_on)
request_args (dict): Original request arguments for tracking
Returns:
tuple: (list of successfully queued albums, list of duplicate albums)
"""
if not url:
raise ValueError("Missing required parameter: url")
# Extract artist ID from URL
artist_id = url.split('/')[-1]
if '?' in artist_id:
artist_id = artist_id.split('?')[0]
artist_id = url.split("/")[-1]
if "?" in artist_id:
artist_id = artist_id.split("?")[0]
logger.info(f"Fetching artist info for ID: {artist_id}")
# Detect URL source (only Spotify is supported for artists)
is_spotify_url = 'open.spotify.com' in url.lower()
is_deezer_url = 'deezer.com' in url.lower()
is_spotify_url = "open.spotify.com" in url.lower()
# Artist functionality only works with Spotify URLs currently
if not is_spotify_url:
error_msg = "Invalid URL: Artist functionality only supports open.spotify.com URLs"
error_msg = (
"Invalid URL: Artist functionality only supports open.spotify.com URLs"
)
logger.error(error_msg)
raise ValueError(error_msg)
# Get artist info with albums
artist_data = get_spotify_info(artist_id, "artist_discography")
# Debug logging to inspect the structure of artist_data
logger.debug(f"Artist data structure has keys: {list(artist_data.keys() if isinstance(artist_data, dict) else [])}")
if not artist_data or 'items' not in artist_data:
raise ValueError(f"Failed to retrieve artist data or no albums found for artist ID {artist_id}")
logger.debug(
f"Artist data structure has keys: {list(artist_data.keys() if isinstance(artist_data, dict) else [])}"
)
if not artist_data or "items" not in artist_data:
raise ValueError(
f"Failed to retrieve artist data or no albums found for artist ID {artist_id}"
)
# Parse the album types to filter by
allowed_types = [t.strip().lower() for t in album_type.split(",")]
logger.info(f"Filtering albums by types: {allowed_types}")
# Get artist name from the first album
artist_name = ""
if artist_data.get('items') and len(artist_data['items']) > 0:
first_album = artist_data['items'][0]
if first_album.get('artists') and len(first_album['artists']) > 0:
artist_name = first_album['artists'][0].get('name', '')
# Filter albums by the specified types
filtered_albums = []
for album in artist_data.get('items', []):
album_type_value = album.get('album_type', '').lower()
album_group_value = album.get('album_group', '').lower()
for album in artist_data.get("items", []):
album_type_value = album.get("album_type", "").lower()
album_group_value = album.get("album_group", "").lower()
# Apply filtering logic based on album_type and album_group
if (('album' in allowed_types and album_type_value == 'album' and album_group_value == 'album') or
('single' in allowed_types and album_type_value == 'single' and album_group_value == 'single') or
('compilation' in allowed_types and album_type_value == 'compilation') or
('appears_on' in allowed_types and album_group_value == 'appears_on')):
if (
(
"album" in allowed_types
and album_type_value == "album"
and album_group_value == "album"
)
or (
"single" in allowed_types
and album_type_value == "single"
and album_group_value == "single"
)
or ("compilation" in allowed_types and album_type_value == "compilation")
or ("appears_on" in allowed_types and album_group_value == "appears_on")
):
filtered_albums.append(album)
if not filtered_albums:
logger.warning(f"No albums match the specified types: {album_type}")
return [], []
# Queue each album as a separate download task
album_task_ids = []
successfully_queued_albums = []
duplicate_albums = [] # To store info about albums that were duplicates
duplicate_albums = [] # To store info about albums that were duplicates
for album in filtered_albums:
# Add detailed logging to inspect each album's structure and URLs
logger.debug(f"Processing album: {album.get('name', 'Unknown')}")
logger.debug(f"Album structure has keys: {list(album.keys())}")
external_urls = album.get('external_urls', {})
external_urls = album.get("external_urls", {})
logger.debug(f"Album external_urls: {external_urls}")
album_url = external_urls.get('spotify', '')
album_name = album.get('name', 'Unknown Album')
album_artists = album.get('artists', [])
album_artist = album_artists[0].get('name', 'Unknown Artist') if album_artists else 'Unknown Artist'
album_id = album.get('id')
album_url = external_urls.get("spotify", "")
album_name = album.get("name", "Unknown Album")
album_artists = album.get("artists", [])
album_artist = (
album_artists[0].get("name", "Unknown Artist")
if album_artists
else "Unknown Artist"
)
album_id = album.get("id")
logger.debug(f"Extracted album URL: {album_url}")
logger.debug(f"Extracted album ID: {album_id}")
@@ -173,7 +203,7 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
if not album_url or not album_id:
logger.warning(f"Skipping album without URL or ID: {album_name}")
continue
# Create album-specific request args instead of using original artist request
album_request_args = {
"url": album_url,
@@ -182,12 +212,14 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
"type": "album",
# URL source will be automatically detected in the download functions
"parent_artist_url": url,
"parent_request_type": "artist"
"parent_request_type": "artist",
}
# Include original download URL for this album task
album_request_args["original_url"] = url_for('album.handle_download', album_id=album_id, _external=True)
album_request_args["original_url"] = url_for(
"album.handle_download", album_id=album_id, _external=True
)
# Create task for this album
task_data = {
"download_type": "album",
@@ -196,44 +228,64 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
"retry_url": album_url, # Use album URL for retry logic, not artist URL
"name": album_name,
"artist": album_artist,
"orig_request": album_request_args # Store album-specific request params
"orig_request": album_request_args, # Store album-specific request params
}
# Debug log the task data being sent to the queue
logger.debug(f"Album task data: url={task_data['url']}, retry_url={task_data['retry_url']}")
logger.debug(
f"Album task data: url={task_data['url']}, retry_url={task_data['retry_url']}"
)
try:
task_id = download_queue_manager.add_task(task_data)
# Check the status of the newly added task to see if it was marked as a duplicate error
last_status = get_last_task_status(task_id)
if last_status and last_status.get("status") == ProgressState.ERROR and last_status.get("existing_task_id"):
logger.warning(f"Album {album_name} (URL: {album_url}) is a duplicate. Error task ID: {task_id}. Existing task ID: {last_status.get('existing_task_id')}")
duplicate_albums.append({
"name": album_name,
"artist": album_artist,
"url": album_url,
"error_task_id": task_id, # This is the ID of the task marked as a duplicate error
"existing_task_id": last_status.get("existing_task_id"),
"message": last_status.get("message", "Duplicate download attempt.")
})
if (
last_status
and last_status.get("status") == ProgressState.ERROR
and last_status.get("existing_task_id")
):
logger.warning(
f"Album {album_name} (URL: {album_url}) is a duplicate. Error task ID: {task_id}. Existing task ID: {last_status.get('existing_task_id')}"
)
duplicate_albums.append(
{
"name": album_name,
"artist": album_artist,
"url": album_url,
"error_task_id": task_id, # This is the ID of the task marked as a duplicate error
"existing_task_id": last_status.get("existing_task_id"),
"message": last_status.get(
"message", "Duplicate download attempt."
),
}
)
else:
# If not a duplicate error, it was successfully queued (or failed for other reasons handled by add_task)
# We only add to successfully_queued_albums if it wasn't a duplicate error from add_task
# Other errors from add_task (like submission failure) would also result in an error status for task_id
# but won't have 'existing_task_id'. The client can check the status of this task_id.
album_task_ids.append(task_id) # Keep track of all task_ids returned by add_task
successfully_queued_albums.append({
"name": album_name,
"artist": album_artist,
"url": album_url,
"task_id": task_id
})
album_task_ids.append(
task_id
) # Keep track of all task_ids returned by add_task
successfully_queued_albums.append(
{
"name": album_name,
"artist": album_artist,
"url": album_url,
"task_id": task_id,
}
)
logger.info(f"Queued album download: {album_name} ({task_id})")
except Exception as e: # Catch any other unexpected error from add_task itself (though it should be rare now)
logger.error(f"Failed to queue album {album_name} due to an unexpected error in add_task: {str(e)}")
except Exception as e: # Catch any other unexpected error from add_task itself (though it should be rare now)
logger.error(
f"Failed to queue album {album_name} due to an unexpected error in add_task: {str(e)}"
)
# Optionally, collect these errors. For now, just logging and continuing.
logger.info(f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found.")
logger.info(
f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found."
)
return successfully_queued_albums, duplicate_albums

View File

@@ -7,49 +7,52 @@ from pathlib import Path
logger = logging.getLogger(__name__)
# Redis configuration - read from environment variables
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', '6379')
REDIS_DB = os.getenv('REDIS_DB', '0')
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = os.getenv("REDIS_PORT", "6379")
REDIS_DB = os.getenv("REDIS_DB", "0")
# Optional Redis password
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '')
REDIS_PASSWORD = os.getenv("REDIS_PASSWORD", "")
# Build default URL with password if provided
_password_part = f":{REDIS_PASSWORD}@" if REDIS_PASSWORD else ""
default_redis_url = f"redis://{_password_part}{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}"
REDIS_URL = os.getenv('REDIS_URL', default_redis_url)
REDIS_BACKEND = os.getenv('REDIS_BACKEND', REDIS_URL)
REDIS_URL = os.getenv("REDIS_URL", default_redis_url)
REDIS_BACKEND = os.getenv("REDIS_BACKEND", REDIS_URL)
# Log Redis connection details
logger.info(f"Redis configuration: REDIS_URL={REDIS_URL}, REDIS_BACKEND={REDIS_BACKEND}")
logger.info(
f"Redis configuration: REDIS_URL={REDIS_URL}, REDIS_BACKEND={REDIS_BACKEND}"
)
# Config path
CONFIG_FILE_PATH = Path('./data/config/main.json')
CONFIG_FILE_PATH = Path("./data/config/main.json")
DEFAULT_MAIN_CONFIG = {
'service': 'spotify',
'spotify': '',
'deezer': '',
'fallback': False,
'spotifyQuality': 'NORMAL',
'deezerQuality': 'MP3_128',
'realTime': False,
'customDirFormat': '%ar_album%/%album%',
'customTrackFormat': '%tracknum%. %music%',
'tracknum_padding': True,
'save_cover': True,
'maxConcurrentDownloads': 3,
'maxRetries': 3,
'retryDelaySeconds': 5,
'retry_delay_increase': 5,
'convertTo': None,
'bitrate': None
"service": "spotify",
"spotify": "",
"deezer": "",
"fallback": False,
"spotifyQuality": "NORMAL",
"deezerQuality": "MP3_128",
"realTime": False,
"customDirFormat": "%ar_album%/%album%",
"customTrackFormat": "%tracknum%. %music%",
"tracknum_padding": True,
"save_cover": True,
"maxConcurrentDownloads": 3,
"maxRetries": 3,
"retryDelaySeconds": 5,
"retry_delay_increase": 5,
"convertTo": None,
"bitrate": None,
}
def get_config_params():
"""
Get configuration parameters from the config file.
Creates the file with defaults if it doesn't exist.
Ensures all default keys are present in the loaded config.
Returns:
dict: A dictionary containing configuration parameters
"""
@@ -59,63 +62,69 @@ def get_config_params():
if not CONFIG_FILE_PATH.exists():
logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default values.")
with open(CONFIG_FILE_PATH, 'w') as f:
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(DEFAULT_MAIN_CONFIG, f, indent=4)
return DEFAULT_MAIN_CONFIG.copy() # Return a copy of defaults
with open(CONFIG_FILE_PATH, 'r') as f:
return DEFAULT_MAIN_CONFIG.copy() # Return a copy of defaults
with open(CONFIG_FILE_PATH, "r") as f:
config = json.load(f)
# Ensure all default keys are present in the loaded config
updated = False
for key, value in DEFAULT_MAIN_CONFIG.items():
if key not in config:
config[key] = value
updated = True
if updated:
logger.info(f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults.")
with open(CONFIG_FILE_PATH, 'w') as f:
logger.info(
f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(config, f, indent=4)
return config
except Exception as e:
logger.error(f"Error reading or creating config at {CONFIG_FILE_PATH}: {e}", exc_info=True)
logger.error(
f"Error reading or creating config at {CONFIG_FILE_PATH}: {e}",
exc_info=True,
)
# Return defaults if config read/create fails
return DEFAULT_MAIN_CONFIG.copy()
# Load configuration values we need for Celery
config_params_values = get_config_params() # Renamed to avoid conflict with module name
MAX_CONCURRENT_DL = config_params_values.get('maxConcurrentDownloads', 3)
MAX_RETRIES = config_params_values.get('maxRetries', 3)
RETRY_DELAY = config_params_values.get('retryDelaySeconds', 5)
RETRY_DELAY_INCREASE = config_params_values.get('retry_delay_increase', 5)
config_params_values = get_config_params() # Renamed to avoid conflict with module name
MAX_CONCURRENT_DL = config_params_values.get("maxConcurrentDownloads", 3)
MAX_RETRIES = config_params_values.get("maxRetries", 3)
RETRY_DELAY = config_params_values.get("retryDelaySeconds", 5)
RETRY_DELAY_INCREASE = config_params_values.get("retry_delay_increase", 5)
# Define task queues
task_queues = {
'default': {
'exchange': 'default',
'routing_key': 'default',
"default": {
"exchange": "default",
"routing_key": "default",
},
'downloads': {
'exchange': 'downloads',
'routing_key': 'downloads',
"downloads": {
"exchange": "downloads",
"routing_key": "downloads",
},
"utility_tasks": {
"exchange": "utility_tasks",
"routing_key": "utility_tasks",
},
'utility_tasks': {
'exchange': 'utility_tasks',
'routing_key': 'utility_tasks',
}
}
# Set default queue
task_default_queue = 'downloads'
task_default_exchange = 'downloads'
task_default_routing_key = 'downloads'
task_default_queue = "downloads"
task_default_exchange = "downloads"
task_default_routing_key = "downloads"
# Celery task settings
task_serializer = 'json'
accept_content = ['json']
result_serializer = 'json'
task_serializer = "json"
accept_content = ["json"]
result_serializer = "json"
enable_utc = True
# Configure worker concurrency based on MAX_CONCURRENT_DL
@@ -123,15 +132,15 @@ worker_concurrency = MAX_CONCURRENT_DL
# Configure task rate limiting - these are per-minute limits
task_annotations = {
'routes.utils.celery_tasks.download_track': {
'rate_limit': f'{MAX_CONCURRENT_DL}/m',
"routes.utils.celery_tasks.download_track": {
"rate_limit": f"{MAX_CONCURRENT_DL}/m",
},
'routes.utils.celery_tasks.download_album': {
'rate_limit': f'{MAX_CONCURRENT_DL}/m',
"routes.utils.celery_tasks.download_album": {
"rate_limit": f"{MAX_CONCURRENT_DL}/m",
},
"routes.utils.celery_tasks.download_playlist": {
"rate_limit": f"{MAX_CONCURRENT_DL}/m",
},
'routes.utils.celery_tasks.download_playlist': {
'rate_limit': f'{MAX_CONCURRENT_DL}/m',
}
}
# Configure retry settings
@@ -144,10 +153,10 @@ result_expires = 60 * 60 * 24 * 7 # 7 days
# Configure visibility timeout for task messages
broker_transport_options = {
'visibility_timeout': 3600, # 1 hour
'fanout_prefix': True,
'fanout_patterns': True,
'priority_steps': [0, 3, 6, 9],
"visibility_timeout": 3600, # 1 hour
"fanout_prefix": True,
"fanout_patterns": True,
"priority_steps": [0, 3, 6, 9],
}
# Important broker connection settings
@@ -157,4 +166,4 @@ broker_connection_max_retries = 10
broker_pool_limit = 10
worker_prefetch_multiplier = 1 # Process one task at a time per worker
worker_max_tasks_per_child = 100 # Restart worker after 100 tasks
worker_disable_rate_limits = False
worker_disable_rate_limits = False

View File

@@ -1,26 +1,9 @@
import os
import json
import signal
import subprocess
import logging
import time
import atexit
from pathlib import Path
import threading
import queue
import sys
import uuid
# Import Celery task utilities
from .celery_tasks import (
ProgressState,
get_task_info,
get_last_task_status,
store_task_status,
get_all_tasks as get_all_celery_tasks_info,
cleanup_stale_errors,
delayed_delete_task_data,
)
from .celery_config import get_config_params, MAX_CONCURRENT_DL
# Configure logging

View File

@@ -1,33 +1,29 @@
import os
import json
import time
import uuid
import logging
from datetime import datetime
from routes.utils.celery_tasks import (
celery_app,
download_track,
download_album,
download_track,
download_album,
download_playlist,
store_task_status,
store_task_status,
store_task_info,
get_task_info,
get_task_status,
get_last_task_status,
cancel_task as cancel_celery_task,
retry_task as retry_celery_task,
get_all_tasks,
ProgressState
ProgressState,
)
# Configure logging
logger = logging.getLogger(__name__)
# Load configuration
CONFIG_PATH = './data/config/main.json'
CONFIG_PATH = "./data/config/main.json"
try:
with open(CONFIG_PATH, 'r') as f:
with open(CONFIG_PATH, "r") as f:
config_data = json.load(f)
MAX_CONCURRENT_DL = config_data.get("maxConcurrentDownloads", 3)
except Exception as e:
@@ -35,82 +31,86 @@ except Exception as e:
# Fallback default
MAX_CONCURRENT_DL = 3
def get_config_params():
"""
Get common download parameters from the config file.
This centralizes parameter retrieval and reduces redundancy in API calls.
Returns:
dict: A dictionary containing common parameters from config
"""
try:
with open(CONFIG_PATH, 'r') as f:
with open(CONFIG_PATH, "r") as f:
config = json.load(f)
return {
'spotify': config.get('spotify', ''),
'deezer': config.get('deezer', ''),
'fallback': config.get('fallback', False),
'spotifyQuality': config.get('spotifyQuality', 'NORMAL'),
'deezerQuality': config.get('deezerQuality', 'MP3_128'),
'realTime': config.get('realTime', False),
'customDirFormat': config.get('customDirFormat', '%ar_album%/%album%'),
'customTrackFormat': config.get('customTrackFormat', '%tracknum%. %music%'),
'tracknum_padding': config.get('tracknum_padding', True),
'save_cover': config.get('save_cover', True),
'maxRetries': config.get('maxRetries', 3),
'retryDelaySeconds': config.get('retryDelaySeconds', 5),
'retry_delay_increase': config.get('retry_delay_increase', 5),
'convertTo': config.get('convertTo', None),
'bitrate': config.get('bitrate', None)
"spotify": config.get("spotify", ""),
"deezer": config.get("deezer", ""),
"fallback": config.get("fallback", False),
"spotifyQuality": config.get("spotifyQuality", "NORMAL"),
"deezerQuality": config.get("deezerQuality", "MP3_128"),
"realTime": config.get("realTime", False),
"customDirFormat": config.get("customDirFormat", "%ar_album%/%album%"),
"customTrackFormat": config.get("customTrackFormat", "%tracknum%. %music%"),
"tracknum_padding": config.get("tracknum_padding", True),
"save_cover": config.get("save_cover", True),
"maxRetries": config.get("maxRetries", 3),
"retryDelaySeconds": config.get("retryDelaySeconds", 5),
"retry_delay_increase": config.get("retry_delay_increase", 5),
"convertTo": config.get("convertTo", None),
"bitrate": config.get("bitrate", None),
}
except Exception as e:
logger.error(f"Error reading config for parameters: {e}")
# Return defaults if config read fails
return {
'spotify': '',
'deezer': '',
'fallback': False,
'spotifyQuality': 'NORMAL',
'deezerQuality': 'MP3_128',
'realTime': False,
'customDirFormat': '%ar_album%/%album%',
'customTrackFormat': '%tracknum%. %music%',
'tracknum_padding': True,
'save_cover': True,
'maxRetries': 3,
'retryDelaySeconds': 5,
'retry_delay_increase': 5,
'convertTo': None, # Default for conversion
'bitrate': None # Default for bitrate
"spotify": "",
"deezer": "",
"fallback": False,
"spotifyQuality": "NORMAL",
"deezerQuality": "MP3_128",
"realTime": False,
"customDirFormat": "%ar_album%/%album%",
"customTrackFormat": "%tracknum%. %music%",
"tracknum_padding": True,
"save_cover": True,
"maxRetries": 3,
"retryDelaySeconds": 5,
"retry_delay_increase": 5,
"convertTo": None, # Default for conversion
"bitrate": None, # Default for bitrate
}
class CeleryDownloadQueueManager:
"""
Manages a queue of download tasks using Celery.
This is a drop-in replacement for the previous DownloadQueueManager.
Instead of using file-based progress tracking, it uses Redis via Celery
for task management and progress tracking.
"""
def __init__(self):
"""Initialize the Celery-based download queue manager"""
self.max_concurrent = MAX_CONCURRENT_DL
self.paused = False
print(f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}")
print(
f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}"
)
def add_task(self, task: dict, from_watch_job: bool = False):
"""
Add a new download task to the Celery queue.
- If from_watch_job is True and an active duplicate is found, the task is not queued and None is returned.
- If from_watch_job is False and an active duplicate is found, a new task ID is created,
set to an ERROR state indicating the duplicate, and this new error task's ID is returned.
Args:
task (dict): Task parameters including download_type, url, etc.
from_watch_job (bool): If True, duplicate active tasks are skipped. Defaults to False.
Returns:
str | None: Task ID if successfully queued or an error task ID for non-watch duplicates.
None if from_watch_job is True and an active duplicate was found.
@@ -121,16 +121,18 @@ class CeleryDownloadQueueManager:
incoming_type = task.get("download_type", "unknown")
if not incoming_url:
logger.warning("Task being added with no URL. Duplicate check might be unreliable.")
logger.warning(
"Task being added with no URL. Duplicate check might be unreliable."
)
NON_BLOCKING_STATES = [
ProgressState.COMPLETE,
ProgressState.CANCELLED,
ProgressState.ERROR,
ProgressState.ERROR,
ProgressState.ERROR_RETRIED,
ProgressState.ERROR_AUTO_CLEANED
ProgressState.ERROR_AUTO_CLEANED,
]
all_existing_tasks_summary = get_all_tasks()
if incoming_url:
for task_summary in all_existing_tasks_summary:
@@ -143,21 +145,24 @@ class CeleryDownloadQueueManager:
if not existing_task_info or not existing_last_status_obj:
continue
existing_url = existing_task_info.get("url")
existing_type = existing_task_info.get("download_type")
existing_status = existing_last_status_obj.get("status")
if (existing_url == incoming_url and
existing_type == incoming_type and
existing_status not in NON_BLOCKING_STATES):
if (
existing_url == incoming_url
and existing_type == incoming_type
and existing_status not in NON_BLOCKING_STATES
):
message = f"Duplicate download: URL '{incoming_url}' (type: {incoming_type}) is already being processed by task {existing_task_id} (status: {existing_status})."
logger.warning(message)
if from_watch_job:
logger.info(f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}.")
return None # Skip execution for watch jobs
logger.info(
f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}."
)
return None # Skip execution for watch jobs
else:
# Create a new task_id for this duplicate request and mark it as an error
error_task_id = str(uuid.uuid4())
@@ -167,27 +172,31 @@ class CeleryDownloadQueueManager:
"name": task.get("name", "Duplicate Task"),
"artist": task.get("artist", ""),
"url": incoming_url,
"original_request": task.get("orig_request", task.get("original_request", {})),
"original_request": task.get(
"orig_request", task.get("original_request", {})
),
"created_at": time.time(),
"is_duplicate_error_task": True
"is_duplicate_error_task": True,
}
store_task_info(error_task_id, error_task_info_payload)
error_status_payload = {
"status": ProgressState.ERROR,
"error": message,
"existing_task_id": existing_task_id,
"existing_task_id": existing_task_id,
"timestamp": time.time(),
"type": error_task_info_payload["type"],
"name": error_task_info_payload["name"],
"artist": error_task_info_payload["artist"]
"artist": error_task_info_payload["artist"],
}
store_task_status(error_task_id, error_status_payload)
return error_task_id # Return the ID of this new error-state task
return error_task_id # Return the ID of this new error-state task
task_id = str(uuid.uuid4())
config_params = get_config_params()
original_request = task.get("orig_request", task.get("original_request", {}))
original_request = task.get(
"orig_request", task.get("original_request", {})
)
complete_task = {
"download_type": incoming_type,
"type": task.get("type", incoming_type),
@@ -195,75 +204,107 @@ class CeleryDownloadQueueManager:
"artist": task.get("artist", ""),
"url": task.get("url", ""),
"retry_url": task.get("retry_url", ""),
"main": original_request.get("main", config_params['deezer']),
"fallback": original_request.get("fallback",
config_params['spotify'] if config_params['fallback'] else None),
"quality": original_request.get("quality", config_params['deezerQuality']),
"fall_quality": original_request.get("fall_quality", config_params['spotifyQuality']),
"real_time": self._parse_bool_param(original_request.get("real_time"), config_params['realTime']),
"custom_dir_format": original_request.get("custom_dir_format", config_params['customDirFormat']),
"custom_track_format": original_request.get("custom_track_format", config_params['customTrackFormat']),
"pad_tracks": self._parse_bool_param(original_request.get("tracknum_padding"), config_params['tracknum_padding']),
"save_cover": self._parse_bool_param(original_request.get("save_cover"), config_params['save_cover']),
"convertTo": original_request.get("convertTo", config_params.get('convertTo')),
"bitrate": original_request.get("bitrate", config_params.get('bitrate')),
"main": original_request.get("main", config_params["deezer"]),
"fallback": original_request.get(
"fallback",
config_params["spotify"] if config_params["fallback"] else None,
),
"quality": original_request.get(
"quality", config_params["deezerQuality"]
),
"fall_quality": original_request.get(
"fall_quality", config_params["spotifyQuality"]
),
"real_time": self._parse_bool_param(
original_request.get("real_time"), config_params["realTime"]
),
"custom_dir_format": original_request.get(
"custom_dir_format", config_params["customDirFormat"]
),
"custom_track_format": original_request.get(
"custom_track_format", config_params["customTrackFormat"]
),
"pad_tracks": self._parse_bool_param(
original_request.get("tracknum_padding"),
config_params["tracknum_padding"],
),
"save_cover": self._parse_bool_param(
original_request.get("save_cover"), config_params["save_cover"]
),
"convertTo": original_request.get(
"convertTo", config_params.get("convertTo")
),
"bitrate": original_request.get(
"bitrate", config_params.get("bitrate")
),
"retry_count": 0,
"original_request": original_request,
"created_at": time.time()
"created_at": time.time(),
}
# If from_watch_job is True, ensure track_details_for_db is passed through
if from_watch_job and "track_details_for_db" in task:
complete_task["track_details_for_db"] = task["track_details_for_db"]
store_task_info(task_id, complete_task)
store_task_status(task_id, {
"status": ProgressState.QUEUED,
"timestamp": time.time(),
"type": complete_task["type"],
"name": complete_task["name"],
"artist": complete_task["artist"],
"retry_count": 0,
"queue_position": len(get_all_tasks()) + 1
})
store_task_status(
task_id,
{
"status": ProgressState.QUEUED,
"timestamp": time.time(),
"type": complete_task["type"],
"name": complete_task["name"],
"artist": complete_task["artist"],
"retry_count": 0,
"queue_position": len(get_all_tasks()) + 1,
},
)
celery_task_map = {
"track": download_track,
"album": download_album,
"playlist": download_playlist
"playlist": download_playlist,
}
task_func = celery_task_map.get(incoming_type)
if task_func:
task_func.apply_async(
kwargs=complete_task,
task_id=task_id,
countdown=0 if not self.paused else 3600
countdown=0 if not self.paused else 3600,
)
logger.info(
f"Added {incoming_type} download task {task_id} to Celery queue."
)
logger.info(f"Added {incoming_type} download task {task_id} to Celery queue.")
return task_id
else:
store_task_status(task_id, {
"status": ProgressState.ERROR,
"message": f"Unsupported download type: {incoming_type}",
"timestamp": time.time()
})
store_task_status(
task_id,
{
"status": ProgressState.ERROR,
"message": f"Unsupported download type: {incoming_type}",
"timestamp": time.time(),
},
)
logger.error(f"Unsupported download type: {incoming_type}")
return task_id
except Exception as e:
logger.error(f"Error adding task to Celery queue: {e}", exc_info=True)
error_task_id = str(uuid.uuid4())
store_task_status(error_task_id, {
"status": ProgressState.ERROR,
"message": f"Error adding task to queue: {str(e)}",
"timestamp": time.time(),
"type": task.get("type", "unknown"),
"name": task.get("name", "Unknown"),
"artist": task.get("artist", "")
})
store_task_status(
error_task_id,
{
"status": ProgressState.ERROR,
"message": f"Error adding task to queue: {str(e)}",
"timestamp": time.time(),
"type": task.get("type", "unknown"),
"name": task.get("name", "Unknown"),
"artist": task.get("artist", ""),
},
)
return error_task_id
def _parse_bool_param(self, param_value, default_value=False):
"""Helper function to parse boolean parameters from string values"""
if param_value is None:
@@ -271,108 +312,111 @@ class CeleryDownloadQueueManager:
if isinstance(param_value, bool):
return param_value
if isinstance(param_value, str):
return param_value.lower() in ['true', '1', 'yes', 'y', 'on']
return param_value.lower() in ["true", "1", "yes", "y", "on"]
return bool(param_value)
def cancel_task(self, task_id):
"""
Cancels a task by its ID.
Args:
task_id (str): The ID of the task to cancel
Returns:
dict: Status information about the cancellation
"""
return cancel_celery_task(task_id)
def retry_task(self, task_id):
"""
Retry a failed task.
Args:
task_id (str): The ID of the failed task to retry
Returns:
dict: Status information about the retry
"""
return retry_celery_task(task_id)
def cancel_all_tasks(self):
"""
Cancel all currently queued and running tasks.
Returns:
dict: Status information about the cancellation
"""
tasks = get_all_tasks()
cancelled_count = 0
for task in tasks:
task_id = task.get("task_id")
status = task.get("status")
# Only cancel tasks that are not already completed or cancelled
if status not in [ProgressState.COMPLETE, ProgressState.CANCELLED]:
result = cancel_celery_task(task_id)
if result.get("status") == "cancelled":
cancelled_count += 1
return {
"status": "all_cancelled",
"cancelled_count": cancelled_count,
"total_tasks": len(tasks)
"total_tasks": len(tasks),
}
def get_queue_status(self):
"""
Get the current status of the queue.
Returns:
dict: Status information about the queue
"""
tasks = get_all_tasks()
# Count tasks by status
running_count = 0
pending_count = 0
failed_count = 0
running_tasks = []
failed_tasks = []
for task in tasks:
status = task.get("status")
if status == ProgressState.PROCESSING:
running_count += 1
running_tasks.append({
"task_id": task.get("task_id"),
"name": task.get("name", "Unknown"),
"type": task.get("type", "unknown"),
"download_type": task.get("download_type", "unknown")
})
running_tasks.append(
{
"task_id": task.get("task_id"),
"name": task.get("name", "Unknown"),
"type": task.get("type", "unknown"),
"download_type": task.get("download_type", "unknown"),
}
)
elif status == ProgressState.QUEUED:
pending_count += 1
elif status == ProgressState.ERROR:
failed_count += 1
# Get task info for retry information
task_info = get_task_info(task.get("task_id"))
last_status = get_last_task_status(task.get("task_id"))
retry_count = 0
if last_status:
retry_count = last_status.get("retry_count", 0)
failed_tasks.append({
"task_id": task.get("task_id"),
"name": task.get("name", "Unknown"),
"type": task.get("type", "unknown"),
"download_type": task.get("download_type", "unknown"),
"retry_count": retry_count
})
failed_tasks.append(
{
"task_id": task.get("task_id"),
"name": task.get("name", "Unknown"),
"type": task.get("type", "unknown"),
"download_type": task.get("download_type", "unknown"),
"retry_count": retry_count,
}
)
return {
"running": running_count,
"pending": pending_count,
@@ -380,87 +424,85 @@ class CeleryDownloadQueueManager:
"max_concurrent": self.max_concurrent,
"paused": self.paused,
"running_tasks": running_tasks,
"failed_tasks": failed_tasks
"failed_tasks": failed_tasks,
}
def pause(self):
"""Pause processing of new tasks."""
self.paused = True
# Get all queued tasks
tasks = get_all_tasks()
for task in tasks:
if task.get("status") == ProgressState.QUEUED:
# Update status to indicate the task is paused
store_task_status(task.get("task_id"), {
"status": ProgressState.QUEUED,
"paused": True,
"message": "Queue is paused, task will run when queue is resumed",
"timestamp": time.time()
})
store_task_status(
task.get("task_id"),
{
"status": ProgressState.QUEUED,
"paused": True,
"message": "Queue is paused, task will run when queue is resumed",
"timestamp": time.time(),
},
)
logger.info("Download queue processing paused")
return {"status": "paused"}
def resume(self):
"""Resume processing of tasks."""
self.paused = False
# Get all queued tasks
tasks = get_all_tasks()
for task in tasks:
if task.get("status") == ProgressState.QUEUED:
task_id = task.get("task_id")
# Get the task info
task_info = get_task_info(task_id)
if not task_info:
continue
# Update status to indicate the task is no longer paused
store_task_status(task_id, {
"status": ProgressState.QUEUED,
"paused": False,
"message": "Queue resumed, task will run soon",
"timestamp": time.time()
})
store_task_status(
task_id,
{
"status": ProgressState.QUEUED,
"paused": False,
"message": "Queue resumed, task will run soon",
"timestamp": time.time(),
},
)
# Reschedule the task to run immediately
download_type = task_info.get("download_type", "unknown")
if download_type == "track":
download_track.apply_async(
kwargs=task_info,
task_id=task_id
)
download_track.apply_async(kwargs=task_info, task_id=task_id)
elif download_type == "album":
download_album.apply_async(
kwargs=task_info,
task_id=task_id
)
download_album.apply_async(kwargs=task_info, task_id=task_id)
elif download_type == "playlist":
download_playlist.apply_async(
kwargs=task_info,
task_id=task_id
)
download_playlist.apply_async(kwargs=task_info, task_id=task_id)
logger.info("Download queue processing resumed")
return {"status": "resumed"}
def start(self):
"""Start the queue manager (no-op for Celery implementation)."""
logger.info("Celery Download Queue Manager started")
return {"status": "started"}
def stop(self):
"""Stop the queue manager (graceful shutdown)."""
logger.info("Celery Download Queue Manager stopping...")
# Cancel all tasks or just let them finish?
# For now, we'll let them finish and just log the shutdown
logger.info("Celery Download Queue Manager stopped")
return {"status": "stopped"}
# Create the global instance
download_queue_manager = CeleryDownloadQueueManager()
download_queue_manager = CeleryDownloadQueueManager()

View File

@@ -1,9 +1,7 @@
import time
import json
import uuid
import logging
import traceback
from datetime import datetime
from celery import Celery, Task, states
from celery.signals import (
task_prerun,
@@ -14,17 +12,13 @@ from celery.signals import (
setup_logging,
)
from celery.exceptions import Retry
import os # Added for path operations
from pathlib import Path # Added for path operations
# Configure logging
logger = logging.getLogger(__name__)
# Setup Redis and Celery
from routes.utils.celery_config import (
REDIS_URL,
REDIS_BACKEND,
REDIS_PASSWORD,
get_config_params,
)
@@ -37,6 +31,12 @@ from routes.utils.watch.db import (
# Import history manager function
from .history_manager import add_entry_to_history
# Create Redis connection for storing task data that's not part of the Celery result backend
import redis
# Configure logging
logger = logging.getLogger(__name__)
# Initialize Celery app
celery_app = Celery(
"routes.utils.celery_tasks", broker=REDIS_URL, backend=REDIS_BACKEND
@@ -45,8 +45,6 @@ celery_app = Celery(
# Load Celery config
celery_app.config_from_object("routes.utils.celery_config")
# Create Redis connection for storing task data that's not part of the Celery result backend
import redis
redis_client = redis.Redis.from_url(REDIS_URL)

View File

@@ -2,8 +2,7 @@ import json
from pathlib import Path
import shutil
import sqlite3
import traceback # For logging detailed error messages
import time # For retry delays
import time # For retry delays
import logging
# Assuming deezspot is in a location findable by Python's import system
@@ -11,38 +10,42 @@ import logging
# from deezspot.deezloader import DeeLogin # Used in validation
# For now, as per original, validation calls these directly.
logger = logging.getLogger(__name__) # Assuming logger is configured elsewhere
logger = logging.getLogger(__name__) # Assuming logger is configured elsewhere
# --- New Database and Path Definitions ---
CREDS_BASE_DIR = Path('./data/creds')
ACCOUNTS_DB_PATH = CREDS_BASE_DIR / 'accounts.db'
BLOBS_DIR = CREDS_BASE_DIR / 'blobs'
GLOBAL_SEARCH_JSON_PATH = CREDS_BASE_DIR / 'search.json' # Global Spotify API creds
CREDS_BASE_DIR = Path("./data/creds")
ACCOUNTS_DB_PATH = CREDS_BASE_DIR / "accounts.db"
BLOBS_DIR = CREDS_BASE_DIR / "blobs"
GLOBAL_SEARCH_JSON_PATH = CREDS_BASE_DIR / "search.json" # Global Spotify API creds
EXPECTED_SPOTIFY_TABLE_COLUMNS = {
"name": "TEXT PRIMARY KEY",
# client_id and client_secret are now global
"region": "TEXT", # ISO 3166-1 alpha-2
"region": "TEXT", # ISO 3166-1 alpha-2
"created_at": "REAL",
"updated_at": "REAL"
"updated_at": "REAL",
}
EXPECTED_DEEZER_TABLE_COLUMNS = {
"name": "TEXT PRIMARY KEY",
"arl": "TEXT",
"region": "TEXT", # ISO 3166-1 alpha-2
"region": "TEXT", # ISO 3166-1 alpha-2
"created_at": "REAL",
"updated_at": "REAL"
"updated_at": "REAL",
}
def _get_db_connection():
ACCOUNTS_DB_PATH.parent.mkdir(parents=True, exist_ok=True)
BLOBS_DIR.mkdir(parents=True, exist_ok=True) # Ensure blobs directory also exists
BLOBS_DIR.mkdir(parents=True, exist_ok=True) # Ensure blobs directory also exists
conn = sqlite3.connect(ACCOUNTS_DB_PATH, timeout=10)
conn.row_factory = sqlite3.Row
return conn
def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_columns: dict):
def _ensure_table_schema(
cursor: sqlite3.Cursor, table_name: str, expected_columns: dict
):
"""Ensures the given table has all expected columns, adding them if necessary."""
try:
cursor.execute(f"PRAGMA table_info({table_name})")
@@ -53,17 +56,21 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum
for col_name, col_type in expected_columns.items():
if col_name not in existing_column_names:
# Basic protection against altering PK after creation if table is not empty
if 'PRIMARY KEY' in col_type.upper() and existing_columns_info:
if "PRIMARY KEY" in col_type.upper() and existing_columns_info:
logger.warning(
f"Column '{col_name}' is part of PRIMARY KEY for table '{table_name}' "
f"and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN."
)
continue
col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip()
col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip()
try:
cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}")
logger.info(f"Added missing column '{col_name} {col_type_for_add}' to table '{table_name}'.")
cursor.execute(
f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}"
)
logger.info(
f"Added missing column '{col_name} {col_type_for_add}' to table '{table_name}'."
)
added_columns = True
except sqlite3.OperationalError as alter_e:
logger.warning(
@@ -72,9 +79,12 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum
)
return added_columns
except sqlite3.Error as e:
logger.error(f"Error ensuring schema for table '{table_name}': {e}", exc_info=True)
logger.error(
f"Error ensuring schema for table '{table_name}': {e}", exc_info=True
)
return False
def init_credentials_db():
"""Initializes the accounts.db and its tables if they don't exist."""
try:
@@ -90,7 +100,7 @@ def init_credentials_db():
)
""")
_ensure_table_schema(cursor, "spotify", EXPECTED_SPOTIFY_TABLE_COLUMNS)
# Deezer Table
cursor.execute("""
CREATE TABLE IF NOT EXISTS deezer (
@@ -102,49 +112,75 @@ def init_credentials_db():
)
""")
_ensure_table_schema(cursor, "deezer", EXPECTED_DEEZER_TABLE_COLUMNS)
# Ensure global search.json exists, create if not
if not GLOBAL_SEARCH_JSON_PATH.exists():
logger.info(f"Global Spotify search credential file not found at {GLOBAL_SEARCH_JSON_PATH}. Creating empty file.")
with open(GLOBAL_SEARCH_JSON_PATH, 'w') as f_search:
json.dump({"client_id": "", "client_secret": ""}, f_search, indent=4)
logger.info(
f"Global Spotify search credential file not found at {GLOBAL_SEARCH_JSON_PATH}. Creating empty file."
)
with open(GLOBAL_SEARCH_JSON_PATH, "w") as f_search:
json.dump(
{"client_id": "", "client_secret": ""}, f_search, indent=4
)
conn.commit()
logger.info(f"Credentials database initialized/schema checked at {ACCOUNTS_DB_PATH}")
logger.info(
f"Credentials database initialized/schema checked at {ACCOUNTS_DB_PATH}"
)
except sqlite3.Error as e:
logger.error(f"Error initializing credentials database: {e}", exc_info=True)
raise
def _get_global_spotify_api_creds():
"""Loads client_id and client_secret from the global search.json."""
if GLOBAL_SEARCH_JSON_PATH.exists():
try:
with open(GLOBAL_SEARCH_JSON_PATH, 'r') as f:
with open(GLOBAL_SEARCH_JSON_PATH, "r") as f:
search_data = json.load(f)
client_id = search_data.get('client_id')
client_secret = search_data.get('client_secret')
client_id = search_data.get("client_id")
client_secret = search_data.get("client_secret")
if client_id and client_secret:
return client_id, client_secret
else:
logger.warning(f"Global Spotify API credentials in {GLOBAL_SEARCH_JSON_PATH} are incomplete.")
logger.warning(
f"Global Spotify API credentials in {GLOBAL_SEARCH_JSON_PATH} are incomplete."
)
except Exception as e:
logger.error(f"Error reading global Spotify API credentials from {GLOBAL_SEARCH_JSON_PATH}: {e}", exc_info=True)
logger.error(
f"Error reading global Spotify API credentials from {GLOBAL_SEARCH_JSON_PATH}: {e}",
exc_info=True,
)
else:
logger.warning(f"Global Spotify API credential file {GLOBAL_SEARCH_JSON_PATH} not found.")
return None, None # Return None if file doesn't exist or creds are incomplete/invalid
logger.warning(
f"Global Spotify API credential file {GLOBAL_SEARCH_JSON_PATH} not found."
)
return (
None,
None,
) # Return None if file doesn't exist or creds are incomplete/invalid
def save_global_spotify_api_creds(client_id: str, client_secret: str):
"""Saves client_id and client_secret to the global search.json."""
try:
GLOBAL_SEARCH_JSON_PATH.parent.mkdir(parents=True, exist_ok=True)
with open(GLOBAL_SEARCH_JSON_PATH, 'w') as f:
json.dump({"client_id": client_id, "client_secret": client_secret}, f, indent=4)
logger.info(f"Global Spotify API credentials saved to {GLOBAL_SEARCH_JSON_PATH}")
with open(GLOBAL_SEARCH_JSON_PATH, "w") as f:
json.dump(
{"client_id": client_id, "client_secret": client_secret}, f, indent=4
)
logger.info(
f"Global Spotify API credentials saved to {GLOBAL_SEARCH_JSON_PATH}"
)
return True
except Exception as e:
logger.error(f"Error saving global Spotify API credentials to {GLOBAL_SEARCH_JSON_PATH}: {e}", exc_info=True)
logger.error(
f"Error saving global Spotify API credentials to {GLOBAL_SEARCH_JSON_PATH}: {e}",
exc_info=True,
)
return False
def _validate_with_retry(service_name, account_name, validation_data):
"""
Attempts to validate credentials with retries for connection errors.
@@ -153,59 +189,84 @@ def _validate_with_retry(service_name, account_name, validation_data):
Returns True if validated, raises ValueError if not.
"""
# Deezspot imports need to be available. Assuming they are.
from deezspot.spotloader import SpoLogin
from deezspot.spotloader import SpoLogin
from deezspot.deezloader import DeeLogin
max_retries = 3 # Reduced for brevity, was 5
max_retries = 3 # Reduced for brevity, was 5
last_exception = None
for attempt in range(max_retries):
try:
if service_name == 'spotify':
if service_name == "spotify":
# For Spotify, validation uses the account's blob and GLOBAL API creds
global_client_id, global_client_secret = _get_global_spotify_api_creds()
if not global_client_id or not global_client_secret:
raise ValueError("Global Spotify API client_id or client_secret not configured for validation.")
blob_file_path = validation_data.get('blob_file_path')
raise ValueError(
"Global Spotify API client_id or client_secret not configured for validation."
)
blob_file_path = validation_data.get("blob_file_path")
if not blob_file_path or not Path(blob_file_path).exists():
raise ValueError(f"Spotify blob file missing for validation of account {account_name}")
SpoLogin(credentials_path=str(blob_file_path), spotify_client_id=global_client_id, spotify_client_secret=global_client_secret)
else: # Deezer
arl = validation_data.get('arl')
raise ValueError(
f"Spotify blob file missing for validation of account {account_name}"
)
SpoLogin(
credentials_path=str(blob_file_path),
spotify_client_id=global_client_id,
spotify_client_secret=global_client_secret,
)
else: # Deezer
arl = validation_data.get("arl")
if not arl:
raise ValueError("Missing 'arl' for Deezer validation.")
DeeLogin(arl=arl)
logger.info(f"{service_name.capitalize()} credentials for {account_name} validated successfully (attempt {attempt + 1}).")
logger.info(
f"{service_name.capitalize()} credentials for {account_name} validated successfully (attempt {attempt + 1})."
)
return True
except Exception as e:
last_exception = e
error_str = str(e).lower()
is_connection_error = (
"connection refused" in error_str or "connection error" in error_str or
"timeout" in error_str or "temporary failure in name resolution" in error_str or
"dns lookup failed" in error_str or "network is unreachable" in error_str or
"ssl handshake failed" in error_str or "connection reset by peer" in error_str
"connection refused" in error_str
or "connection error" in error_str
or "timeout" in error_str
or "temporary failure in name resolution" in error_str
or "dns lookup failed" in error_str
or "network is unreachable" in error_str
or "ssl handshake failed" in error_str
or "connection reset by peer" in error_str
)
if is_connection_error and attempt < max_retries - 1:
retry_delay = 2 + attempt
logger.warning(f"Validation for {account_name} ({service_name}) failed (attempt {attempt + 1}) due to connection issue: {e}. Retrying in {retry_delay}s...")
retry_delay = 2 + attempt
logger.warning(
f"Validation for {account_name} ({service_name}) failed (attempt {attempt + 1}) due to connection issue: {e}. Retrying in {retry_delay}s..."
)
time.sleep(retry_delay)
continue
else:
logger.error(f"Validation for {account_name} ({service_name}) failed on attempt {attempt + 1} (non-retryable or max retries).")
logger.error(
f"Validation for {account_name} ({service_name}) failed on attempt {attempt + 1} (non-retryable or max retries)."
)
break
if last_exception:
base_error_message = str(last_exception).splitlines()[-1]
detailed_error_message = f"Invalid {service_name} credentials for {account_name}. Verification failed: {base_error_message}"
if service_name == 'spotify' and "incorrect padding" in base_error_message.lower():
detailed_error_message += ". Hint: For Spotify, ensure the credentials blob content is correct."
if (
service_name == "spotify"
and "incorrect padding" in base_error_message.lower()
):
detailed_error_message += (
". Hint: For Spotify, ensure the credentials blob content is correct."
)
raise ValueError(detailed_error_message)
else:
raise ValueError(f"Invalid {service_name} credentials for {account_name}. Verification failed (unknown reason after retries).")
raise ValueError(
f"Invalid {service_name} credentials for {account_name}. Verification failed (unknown reason after retries)."
)
def create_credential(service, name, data):
@@ -219,54 +280,67 @@ def create_credential(service, name, data):
Raises:
ValueError, FileExistsError
"""
if service not in ['spotify', 'deezer']:
if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'")
if not name or not isinstance(name, str):
raise ValueError("Credential name must be a non-empty string.")
current_time = time.time()
with _get_db_connection() as conn:
cursor = conn.cursor()
conn.row_factory = sqlite3.Row
try:
if service == 'spotify':
required_fields = {'region', 'blob_content'} # client_id/secret are global
if service == "spotify":
required_fields = {
"region",
"blob_content",
} # client_id/secret are global
if not required_fields.issubset(data.keys()):
raise ValueError(f"Missing fields for Spotify. Required: {required_fields}")
raise ValueError(
f"Missing fields for Spotify. Required: {required_fields}"
)
blob_path = BLOBS_DIR / name / "credentials.json"
validation_data = {
"blob_file_path": str(blob_path)
} # Validation uses global API creds
blob_path = BLOBS_DIR / name / 'credentials.json'
validation_data = {'blob_file_path': str(blob_path)} # Validation uses global API creds
blob_path.parent.mkdir(parents=True, exist_ok=True)
with open(blob_path, 'w') as f_blob:
if isinstance(data['blob_content'], dict):
json.dump(data['blob_content'], f_blob, indent=4)
else: # assume string
f_blob.write(data['blob_content'])
with open(blob_path, "w") as f_blob:
if isinstance(data["blob_content"], dict):
json.dump(data["blob_content"], f_blob, indent=4)
else: # assume string
f_blob.write(data["blob_content"])
try:
_validate_with_retry('spotify', name, validation_data)
_validate_with_retry("spotify", name, validation_data)
cursor.execute(
"INSERT INTO spotify (name, region, created_at, updated_at) VALUES (?, ?, ?, ?)",
(name, data['region'], current_time, current_time)
(name, data["region"], current_time, current_time),
)
except Exception as e:
if blob_path.exists(): blob_path.unlink() # Cleanup blob
if blob_path.parent.exists() and not any(blob_path.parent.iterdir()): blob_path.parent.rmdir()
raise # Re-raise validation or DB error
elif service == 'deezer':
required_fields = {'arl', 'region'}
except Exception:
if blob_path.exists():
blob_path.unlink() # Cleanup blob
if blob_path.parent.exists() and not any(
blob_path.parent.iterdir()
):
blob_path.parent.rmdir()
raise # Re-raise validation or DB error
elif service == "deezer":
required_fields = {"arl", "region"}
if not required_fields.issubset(data.keys()):
raise ValueError(f"Missing fields for Deezer. Required: {required_fields}")
validation_data = {'arl': data['arl']}
_validate_with_retry('deezer', name, validation_data)
raise ValueError(
f"Missing fields for Deezer. Required: {required_fields}"
)
validation_data = {"arl": data["arl"]}
_validate_with_retry("deezer", name, validation_data)
cursor.execute(
"INSERT INTO deezer (name, arl, region, created_at, updated_at) VALUES (?, ?, ?, ?, ?)",
(name, data['arl'], data['region'], current_time, current_time)
(name, data["arl"], data["region"], current_time, current_time),
)
conn.commit()
logger.info(f"Credential '{name}' for {service} created successfully.")
@@ -274,7 +348,9 @@ def create_credential(service, name, data):
except sqlite3.IntegrityError:
raise FileExistsError(f"Credential '{name}' already exists for {service}.")
except Exception as e:
logger.error(f"Error creating credential {name} for {service}: {e}", exc_info=True)
logger.error(
f"Error creating credential {name} for {service}: {e}", exc_info=True
)
raise ValueError(f"Could not create credential: {e}")
@@ -285,12 +361,12 @@ def get_credential(service, name):
For Deezer, returns dict with name, arl, and region.
Raises FileNotFoundError if the credential does not exist.
"""
if service not in ['spotify', 'deezer']:
if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'")
with _get_db_connection() as conn:
cursor = conn.cursor()
conn.row_factory = sqlite3.Row # Ensure row_factory is set for this cursor
conn.row_factory = sqlite3.Row # Ensure row_factory is set for this cursor
cursor.execute(f"SELECT * FROM {service} WHERE name = ?", (name,))
row = cursor.fetchone()
@@ -299,63 +375,72 @@ def get_credential(service, name):
data = dict(row)
if service == 'spotify':
blob_file_path = BLOBS_DIR / name / 'credentials.json'
data['blob_file_path'] = str(blob_file_path) # Keep for internal use
if service == "spotify":
blob_file_path = BLOBS_DIR / name / "credentials.json"
data["blob_file_path"] = str(blob_file_path) # Keep for internal use
try:
with open(blob_file_path, 'r') as f_blob:
with open(blob_file_path, "r") as f_blob:
blob_data = json.load(f_blob)
data['blob_content'] = blob_data
data["blob_content"] = blob_data
except FileNotFoundError:
logger.warning(f"Spotify blob file not found for {name} at {blob_file_path} during get_credential.")
data['blob_content'] = None
logger.warning(
f"Spotify blob file not found for {name} at {blob_file_path} during get_credential."
)
data["blob_content"] = None
except json.JSONDecodeError:
logger.warning(f"Error decoding JSON from Spotify blob file for {name} at {blob_file_path}.")
data['blob_content'] = None
logger.warning(
f"Error decoding JSON from Spotify blob file for {name} at {blob_file_path}."
)
data["blob_content"] = None
except Exception as e:
logger.error(f"Unexpected error reading Spotify blob for {name}: {e}", exc_info=True)
data['blob_content'] = None
logger.error(
f"Unexpected error reading Spotify blob for {name}: {e}",
exc_info=True,
)
data["blob_content"] = None
cleaned_data = {
'name': data.get('name'),
'region': data.get('region'),
'blob_content': data.get('blob_content')
"name": data.get("name"),
"region": data.get("region"),
"blob_content": data.get("blob_content"),
}
return cleaned_data
elif service == 'deezer':
elif service == "deezer":
cleaned_data = {
'name': data.get('name'),
'region': data.get('region'),
'arl': data.get('arl')
"name": data.get("name"),
"region": data.get("region"),
"arl": data.get("arl"),
}
return cleaned_data
# Fallback, should not be reached if service is spotify or deezer
return None
def list_credentials(service):
if service not in ['spotify', 'deezer']:
if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'")
with _get_db_connection() as conn:
cursor = conn.cursor()
conn.row_factory = sqlite3.Row
cursor.execute(f"SELECT name FROM {service}")
return [row['name'] for row in cursor.fetchall()]
return [row["name"] for row in cursor.fetchall()]
def delete_credential(service, name):
if service not in ['spotify', 'deezer']:
if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'")
with _get_db_connection() as conn:
cursor = conn.cursor()
conn.row_factory = sqlite3.Row
cursor.execute(f"DELETE FROM {service} WHERE name = ?", (name,))
if cursor.rowcount == 0:
raise FileNotFoundError(f"Credential '{name}' not found for {service}.")
if service == 'spotify':
if service == "spotify":
blob_dir = BLOBS_DIR / name
if blob_dir.exists():
shutil.rmtree(blob_dir)
@@ -363,6 +448,7 @@ def delete_credential(service, name):
logger.info(f"Credential '{name}' for {service} deleted.")
return {"status": "deleted", "service": service, "name": name}
def edit_credential(service, name, new_data):
"""
Edits an existing credential.
@@ -370,98 +456,125 @@ def edit_credential(service, name, new_data):
new_data for Deezer can include: arl, region.
Fields not in new_data remain unchanged.
"""
if service not in ['spotify', 'deezer']:
if service not in ["spotify", "deezer"]:
raise ValueError("Service must be 'spotify' or 'deezer'")
current_time = time.time()
# Fetch existing data first to preserve unchanged fields and for validation backup
try:
existing_cred = get_credential(service, name) # This will raise FileNotFoundError if not found
existing_cred = get_credential(
service, name
) # This will raise FileNotFoundError if not found
except FileNotFoundError:
raise
except Exception as e: # Catch other errors from get_credential
except Exception as e: # Catch other errors from get_credential
raise ValueError(f"Could not retrieve existing credential {name} for edit: {e}")
updated_fields = new_data.copy()
with _get_db_connection() as conn:
cursor = conn.cursor()
conn.row_factory = sqlite3.Row
if service == 'spotify':
if service == "spotify":
# Prepare data for DB update
db_update_data = {
'region': updated_fields.get('region', existing_cred['region']),
'updated_at': current_time,
'name': name # for WHERE clause
"region": updated_fields.get("region", existing_cred["region"]),
"updated_at": current_time,
"name": name, # for WHERE clause
}
blob_path = Path(existing_cred['blob_file_path']) # Use path from existing
blob_path = Path(existing_cred["blob_file_path"]) # Use path from existing
original_blob_content = None
if blob_path.exists():
with open(blob_path, 'r') as f_orig_blob:
with open(blob_path, "r") as f_orig_blob:
original_blob_content = f_orig_blob.read()
# If blob_content is being updated, write it temporarily for validation
if 'blob_content' in updated_fields:
if "blob_content" in updated_fields:
blob_path.parent.mkdir(parents=True, exist_ok=True)
with open(blob_path, 'w') as f_new_blob:
if isinstance(updated_fields['blob_content'], dict):
json.dump(updated_fields['blob_content'], f_new_blob, indent=4)
with open(blob_path, "w") as f_new_blob:
if isinstance(updated_fields["blob_content"], dict):
json.dump(updated_fields["blob_content"], f_new_blob, indent=4)
else:
f_new_blob.write(updated_fields['blob_content'])
validation_data = {'blob_file_path': str(blob_path)}
f_new_blob.write(updated_fields["blob_content"])
validation_data = {"blob_file_path": str(blob_path)}
try:
_validate_with_retry('spotify', name, validation_data)
set_clause = ", ".join([f"{key} = ?" for key in db_update_data if key != 'name'])
values = [db_update_data[key] for key in db_update_data if key != 'name'] + [name]
cursor.execute(f"UPDATE spotify SET {set_clause} WHERE name = ?", tuple(values))
_validate_with_retry("spotify", name, validation_data)
set_clause = ", ".join(
[f"{key} = ?" for key in db_update_data if key != "name"]
)
values = [
db_update_data[key] for key in db_update_data if key != "name"
] + [name]
cursor.execute(
f"UPDATE spotify SET {set_clause} WHERE name = ?", tuple(values)
)
# If validation passed and blob was in new_data, it's already written.
# If blob_content was NOT in new_data, the existing blob (if any) remains.
except Exception as e:
except Exception:
# Revert blob if it was changed and validation failed
if 'blob_content' in updated_fields and original_blob_content is not None:
with open(blob_path, 'w') as f_revert_blob:
if (
"blob_content" in updated_fields
and original_blob_content is not None
):
with open(blob_path, "w") as f_revert_blob:
f_revert_blob.write(original_blob_content)
elif 'blob_content' in updated_fields and original_blob_content is None and blob_path.exists():
elif (
"blob_content" in updated_fields
and original_blob_content is None
and blob_path.exists()
):
# If new blob was written but there was no original to revert to, delete the new one.
blob_path.unlink()
raise # Re-raise validation or DB error
raise # Re-raise validation or DB error
elif service == 'deezer':
elif service == "deezer":
db_update_data = {
'arl': updated_fields.get('arl', existing_cred['arl']),
'region': updated_fields.get('region', existing_cred['region']),
'updated_at': current_time,
'name': name # for WHERE clause
"arl": updated_fields.get("arl", existing_cred["arl"]),
"region": updated_fields.get("region", existing_cred["region"]),
"updated_at": current_time,
"name": name, # for WHERE clause
}
validation_data = {'arl': db_update_data['arl']}
_validate_with_retry('deezer', name, validation_data) # Validation happens before DB write for Deezer
set_clause = ", ".join([f"{key} = ?" for key in db_update_data if key != 'name'])
values = [db_update_data[key] for key in db_update_data if key != 'name'] + [name]
cursor.execute(f"UPDATE deezer SET {set_clause} WHERE name = ?", tuple(values))
validation_data = {"arl": db_update_data["arl"]}
_validate_with_retry(
"deezer", name, validation_data
) # Validation happens before DB write for Deezer
set_clause = ", ".join(
[f"{key} = ?" for key in db_update_data if key != "name"]
)
values = [
db_update_data[key] for key in db_update_data if key != "name"
] + [name]
cursor.execute(
f"UPDATE deezer SET {set_clause} WHERE name = ?", tuple(values)
)
if cursor.rowcount == 0: # Should not happen if get_credential succeeded
raise FileNotFoundError(
f"Credential '{name}' for {service} disappeared during edit."
)
if cursor.rowcount == 0: # Should not happen if get_credential succeeded
raise FileNotFoundError(f"Credential '{name}' for {service} disappeared during edit.")
conn.commit()
logger.info(f"Credential '{name}' for {service} updated successfully.")
return {"status": "updated", "service": service, "name": name}
# --- Helper for credential file path (mainly for Spotify blob) ---
def get_spotify_blob_path(account_name: str) -> Path:
return BLOBS_DIR / account_name / 'credentials.json'
return BLOBS_DIR / account_name / "credentials.json"
# It's good practice to call init_credentials_db() when the app starts.
# This can be done in the main application setup. For now, defining it here.
# If this script is run directly for setup, you could add:
# if __name__ == '__main__':
# init_credentials_db()
# print("Credentials database initialized.")
# print("Credentials database initialized.")

View File

@@ -1,8 +1,4 @@
#!/usr/bin/python3
from deezspot.easy_spoty import Spo
import json
from pathlib import Path
from routes.utils.celery_queue_manager import get_config_params
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
@@ -13,37 +9,42 @@ import logging
# Initialize logger
logger = logging.getLogger(__name__)
# We'll rely on get_config_params() instead of directly loading the config file
def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
"""
Get info from Spotify API. Uses global client_id/secret from search.json.
The default Spotify account from main.json might still be relevant for other Spo settings or if Spo uses it.
Args:
spotify_id: The Spotify ID of the entity
spotify_type: The type of entity (track, album, playlist, artist, artist_discography, episode)
limit (int, optional): The maximum number of items to return. Only used if spotify_type is "artist_discography".
offset (int, optional): The index of the first item to return. Only used if spotify_type is "artist_discography".
Returns:
Dictionary with the entity information
"""
client_id, client_secret = _get_global_spotify_api_creds()
if not client_id or not client_secret:
raise ValueError("Global Spotify API client_id or client_secret not configured in ./data/creds/search.json.")
raise ValueError(
"Global Spotify API client_id or client_secret not configured in ./data/creds/search.json."
)
# Get config parameters including default Spotify account name
# This might still be useful if Spo uses the account name for other things (e.g. market/region if not passed explicitly)
# For now, we are just ensuring the API keys are set.
config_params = get_config_params()
main_spotify_account_name = config_params.get('spotify', '') # Still good to know which account is 'default' contextually
main_spotify_account_name = config_params.get(
"spotify", ""
) # Still good to know which account is 'default' contextually
if not main_spotify_account_name:
# This is less critical now that API keys are global, but could indicate a misconfiguration
# if other parts of Spo expect an account context.
print(f"WARN: No default Spotify account name configured in settings (main.json). API calls will use global keys.")
print(
"WARN: No default Spotify account name configured in settings (main.json). API calls will use global keys."
)
else:
# Optionally, one could load the specific account's region here if Spo.init or methods need it,
# but easy_spoty's Spo doesn't seem to take region directly in __init__.
@@ -51,16 +52,20 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
try:
# We call get_credential just to check if the account exists,
# not for client_id/secret anymore for Spo.__init__
get_credential('spotify', main_spotify_account_name)
get_credential("spotify", main_spotify_account_name)
except FileNotFoundError:
# This is a more serious warning if an account is expected to exist.
print(f"WARN: Default Spotify account '{main_spotify_account_name}' configured in main.json was not found in credentials database.")
print(
f"WARN: Default Spotify account '{main_spotify_account_name}' configured in main.json was not found in credentials database."
)
except Exception as e:
print(f"WARN: Error accessing default Spotify account '{main_spotify_account_name}': {e}")
print(
f"WARN: Error accessing default Spotify account '{main_spotify_account_name}': {e}"
)
# Initialize the Spotify client with GLOBAL credentials
Spo.__init__(client_id, client_secret)
if spotify_type == "track":
return Spo.get_track(spotify_id)
elif spotify_type == "album":
@@ -83,27 +88,30 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
else:
raise ValueError(f"Unsupported Spotify type: {spotify_type}")
def get_deezer_info(deezer_id, deezer_type, limit=None):
"""
Get info from Deezer API.
Args:
deezer_id: The Deezer ID of the entity.
deezer_type: The type of entity (track, album, playlist, artist, episode,
artist_top_tracks, artist_albums, artist_related,
deezer_type: The type of entity (track, album, playlist, artist, episode,
artist_top_tracks, artist_albums, artist_related,
artist_radio, artist_playlists).
limit (int, optional): The maximum number of items to return. Used for
limit (int, optional): The maximum number of items to return. Used for
artist_top_tracks, artist_albums, artist_playlists.
Deezer API methods usually have their own defaults (e.g., 25)
Deezer API methods usually have their own defaults (e.g., 25)
if limit is not provided or None is passed to them.
Returns:
Dictionary with the entity information.
Raises:
ValueError: If deezer_type is unsupported.
Various exceptions from DeezerAPI (NoDataApi, QuotaExceeded, requests.exceptions.RequestException, etc.)
"""
logger.debug(f"Fetching Deezer info for ID {deezer_id}, type {deezer_type}, limit {limit}")
logger.debug(
f"Fetching Deezer info for ID {deezer_id}, type {deezer_type}, limit {limit}"
)
# DeezerAPI uses class methods; its @classmethod __init__ handles setup.
# No specific ARL or account handling here as DeezerAPI seems to use general endpoints.
@@ -121,11 +129,11 @@ def get_deezer_info(deezer_id, deezer_type, limit=None):
elif deezer_type == "artist_top_tracks":
if limit is not None:
return DeezerAPI.get_artist_top_tracks(deezer_id, limit=limit)
return DeezerAPI.get_artist_top_tracks(deezer_id) # Use API default limit
elif deezer_type == "artist_albums": # Maps to get_artist_top_albums
return DeezerAPI.get_artist_top_tracks(deezer_id) # Use API default limit
elif deezer_type == "artist_albums": # Maps to get_artist_top_albums
if limit is not None:
return DeezerAPI.get_artist_top_albums(deezer_id, limit=limit)
return DeezerAPI.get_artist_top_albums(deezer_id) # Use API default limit
return DeezerAPI.get_artist_top_albums(deezer_id) # Use API default limit
elif deezer_type == "artist_related":
return DeezerAPI.get_artist_related(deezer_id)
elif deezer_type == "artist_radio":
@@ -133,7 +141,7 @@ def get_deezer_info(deezer_id, deezer_type, limit=None):
elif deezer_type == "artist_playlists":
if limit is not None:
return DeezerAPI.get_artist_top_playlists(deezer_id, limit=limit)
return DeezerAPI.get_artist_top_playlists(deezer_id) # Use API default limit
return DeezerAPI.get_artist_top_playlists(deezer_id) # Use API default limit
else:
logger.error(f"Unsupported Deezer type: {deezer_type}")
raise ValueError(f"Unsupported Deezer type: {deezer_type}")

View File

@@ -6,29 +6,30 @@ from pathlib import Path
logger = logging.getLogger(__name__)
HISTORY_DIR = Path('./data/history')
HISTORY_DB_FILE = HISTORY_DIR / 'download_history.db'
HISTORY_DIR = Path("./data/history")
HISTORY_DB_FILE = HISTORY_DIR / "download_history.db"
EXPECTED_COLUMNS = {
'task_id': 'TEXT PRIMARY KEY',
'download_type': 'TEXT',
'item_name': 'TEXT',
'item_artist': 'TEXT',
'item_album': 'TEXT',
'item_url': 'TEXT',
'spotify_id': 'TEXT',
'status_final': 'TEXT', # 'COMPLETED', 'ERROR', 'CANCELLED'
'error_message': 'TEXT',
'timestamp_added': 'REAL',
'timestamp_completed': 'REAL',
'original_request_json': 'TEXT',
'last_status_obj_json': 'TEXT',
'service_used': 'TEXT',
'quality_profile': 'TEXT',
'convert_to': 'TEXT',
'bitrate': 'TEXT'
"task_id": "TEXT PRIMARY KEY",
"download_type": "TEXT",
"item_name": "TEXT",
"item_artist": "TEXT",
"item_album": "TEXT",
"item_url": "TEXT",
"spotify_id": "TEXT",
"status_final": "TEXT", # 'COMPLETED', 'ERROR', 'CANCELLED'
"error_message": "TEXT",
"timestamp_added": "REAL",
"timestamp_completed": "REAL",
"original_request_json": "TEXT",
"last_status_obj_json": "TEXT",
"service_used": "TEXT",
"quality_profile": "TEXT",
"convert_to": "TEXT",
"bitrate": "TEXT",
}
def init_history_db():
"""Initializes the download history database, creates the table if it doesn't exist,
and adds any missing columns to an existing table."""
@@ -42,7 +43,7 @@ def init_history_db():
# The primary key constraint is handled by the initial CREATE TABLE.
# If 'task_id' is missing, it cannot be added as PRIMARY KEY to an existing table
# without complex migrations. We assume 'task_id' will exist if the table exists.
create_table_sql = f"""
create_table_sql = """
CREATE TABLE IF NOT EXISTS download_history (
task_id TEXT PRIMARY KEY,
download_type TEXT,
@@ -74,42 +75,54 @@ def init_history_db():
added_columns = False
for col_name, col_type in EXPECTED_COLUMNS.items():
if col_name not in existing_column_names:
if 'PRIMARY KEY' in col_type.upper() and col_name == 'task_id':
if "PRIMARY KEY" in col_type.upper() and col_name == "task_id":
# This case should be handled by CREATE TABLE, but as a safeguard:
# If task_id is somehow missing and table exists, this is a problem.
# Adding it as PK here is complex and might fail if data exists.
# For now, we assume CREATE TABLE handles the PK.
# If we were to add it, it would be 'ALTER TABLE download_history ADD COLUMN task_id TEXT;'
# and then potentially a separate step to make it PK if table is empty, which is non-trivial.
logger.warning(f"Column '{col_name}' is part of PRIMARY KEY and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN.")
logger.warning(
f"Column '{col_name}' is part of PRIMARY KEY and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN."
)
continue
# For other columns, just add them.
# Remove PRIMARY KEY from type definition if present, as it's only for table creation.
col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip()
col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip()
try:
cursor.execute(f"ALTER TABLE download_history ADD COLUMN {col_name} {col_type_for_add}")
logger.info(f"Added missing column '{col_name} {col_type_for_add}' to download_history table.")
cursor.execute(
f"ALTER TABLE download_history ADD COLUMN {col_name} {col_type_for_add}"
)
logger.info(
f"Added missing column '{col_name} {col_type_for_add}' to download_history table."
)
added_columns = True
except sqlite3.OperationalError as alter_e:
# This might happen if a column (e.g. task_id) without "PRIMARY KEY" is added by this loop
# but the initial create table already made it a primary key.
# Or other more complex scenarios.
logger.warning(f"Could not add column '{col_name}': {alter_e}. It might already exist or there's a schema mismatch.")
logger.warning(
f"Could not add column '{col_name}': {alter_e}. It might already exist or there's a schema mismatch."
)
if added_columns:
conn.commit()
logger.info(f"Download history table schema updated at {HISTORY_DB_FILE}")
else:
logger.info(f"Download history database schema is up-to-date at {HISTORY_DB_FILE}")
logger.info(
f"Download history database schema is up-to-date at {HISTORY_DB_FILE}"
)
except sqlite3.Error as e:
logger.error(f"Error initializing download history database: {e}", exc_info=True)
logger.error(
f"Error initializing download history database: {e}", exc_info=True
)
finally:
if conn:
conn.close()
def add_entry_to_history(history_data: dict):
"""Adds or replaces an entry in the download_history table.
@@ -118,11 +131,23 @@ def add_entry_to_history(history_data: dict):
Expected keys match the table columns.
"""
required_keys = [
'task_id', 'download_type', 'item_name', 'item_artist', 'item_album',
'item_url', 'spotify_id', 'status_final', 'error_message',
'timestamp_added', 'timestamp_completed', 'original_request_json',
'last_status_obj_json', 'service_used', 'quality_profile',
'convert_to', 'bitrate'
"task_id",
"download_type",
"item_name",
"item_artist",
"item_album",
"item_url",
"spotify_id",
"status_final",
"error_message",
"timestamp_added",
"timestamp_completed",
"original_request_json",
"last_status_obj_json",
"service_used",
"quality_profile",
"convert_to",
"bitrate",
]
# Ensure all keys are present, filling with None if not
for key in required_keys:
@@ -132,7 +157,8 @@ def add_entry_to_history(history_data: dict):
try:
conn = sqlite3.connect(HISTORY_DB_FILE)
cursor = conn.cursor()
cursor.execute("""
cursor.execute(
"""
INSERT OR REPLACE INTO download_history (
task_id, download_type, item_name, item_artist, item_album,
item_url, spotify_id, status_final, error_message,
@@ -140,26 +166,49 @@ def add_entry_to_history(history_data: dict):
last_status_obj_json, service_used, quality_profile,
convert_to, bitrate
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
history_data['task_id'], history_data['download_type'], history_data['item_name'],
history_data['item_artist'], history_data['item_album'], history_data['item_url'],
history_data['spotify_id'], history_data['status_final'], history_data['error_message'],
history_data['timestamp_added'], history_data['timestamp_completed'],
history_data['original_request_json'], history_data['last_status_obj_json'],
history_data['service_used'], history_data['quality_profile'],
history_data['convert_to'], history_data['bitrate']
))
""",
(
history_data["task_id"],
history_data["download_type"],
history_data["item_name"],
history_data["item_artist"],
history_data["item_album"],
history_data["item_url"],
history_data["spotify_id"],
history_data["status_final"],
history_data["error_message"],
history_data["timestamp_added"],
history_data["timestamp_completed"],
history_data["original_request_json"],
history_data["last_status_obj_json"],
history_data["service_used"],
history_data["quality_profile"],
history_data["convert_to"],
history_data["bitrate"],
),
)
conn.commit()
logger.info(f"Added/Updated history for task_id: {history_data['task_id']}, status: {history_data['status_final']}")
logger.info(
f"Added/Updated history for task_id: {history_data['task_id']}, status: {history_data['status_final']}"
)
except sqlite3.Error as e:
logger.error(f"Error adding entry to download history for task_id {history_data.get('task_id')}: {e}", exc_info=True)
logger.error(
f"Error adding entry to download history for task_id {history_data.get('task_id')}: {e}",
exc_info=True,
)
except Exception as e:
logger.error(f"Unexpected error adding to history for task_id {history_data.get('task_id')}: {e}", exc_info=True)
logger.error(
f"Unexpected error adding to history for task_id {history_data.get('task_id')}: {e}",
exc_info=True,
)
finally:
if conn:
conn.close()
def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_order='DESC', filters=None):
def get_history_entries(
limit=25, offset=0, sort_by="timestamp_completed", sort_order="DESC", filters=None
):
"""Retrieves entries from the download_history table with pagination, sorting, and filtering.
Args:
@@ -189,10 +238,10 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
if filters:
for column, value in filters.items():
# Basic security: ensure column is a valid one (alphanumeric + underscore)
if column.replace('_', '').isalnum():
if column.replace("_", "").isalnum():
where_clauses.append(f"{column} = ?")
params.append(value)
if where_clauses:
where_sql = " WHERE " + " AND ".join(where_clauses)
count_query += where_sql
@@ -204,23 +253,33 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
# Validate sort_by and sort_order to prevent SQL injection
valid_sort_columns = [
'task_id', 'download_type', 'item_name', 'item_artist', 'item_album',
'item_url', 'status_final', 'timestamp_added', 'timestamp_completed',
'service_used', 'quality_profile', 'convert_to', 'bitrate'
"task_id",
"download_type",
"item_name",
"item_artist",
"item_album",
"item_url",
"status_final",
"timestamp_added",
"timestamp_completed",
"service_used",
"quality_profile",
"convert_to",
"bitrate",
]
if sort_by not in valid_sort_columns:
sort_by = 'timestamp_completed' # Default sort
sort_by = "timestamp_completed" # Default sort
sort_order_upper = sort_order.upper()
if sort_order_upper not in ['ASC', 'DESC']:
sort_order_upper = 'DESC'
if sort_order_upper not in ["ASC", "DESC"]:
sort_order_upper = "DESC"
select_query += f" ORDER BY {sort_by} {sort_order_upper} LIMIT ? OFFSET ?"
params.extend([limit, offset])
cursor.execute(select_query, params)
rows = cursor.fetchall()
# Convert rows to list of dicts
entries = [dict(row) for row in rows]
return entries, total_count
@@ -232,72 +291,79 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
if conn:
conn.close()
if __name__ == '__main__':
if __name__ == "__main__":
# For testing purposes
logging.basicConfig(level=logging.INFO)
init_history_db()
sample_data_complete = {
'task_id': 'test_task_123',
'download_type': 'track',
'item_name': 'Test Song',
'item_artist': 'Test Artist',
'item_album': 'Test Album',
'item_url': 'http://spotify.com/track/123',
'spotify_id': '123',
'status_final': 'COMPLETED',
'error_message': None,
'timestamp_added': time.time() - 3600,
'timestamp_completed': time.time(),
'original_request_json': json.dumps({'param1': 'value1'}),
'last_status_obj_json': json.dumps({'status': 'complete', 'message': 'Finished!'}),
'service_used': 'Spotify (Primary)',
'quality_profile': 'NORMAL',
'convert_to': None,
'bitrate': None
"task_id": "test_task_123",
"download_type": "track",
"item_name": "Test Song",
"item_artist": "Test Artist",
"item_album": "Test Album",
"item_url": "http://spotify.com/track/123",
"spotify_id": "123",
"status_final": "COMPLETED",
"error_message": None,
"timestamp_added": time.time() - 3600,
"timestamp_completed": time.time(),
"original_request_json": json.dumps({"param1": "value1"}),
"last_status_obj_json": json.dumps(
{"status": "complete", "message": "Finished!"}
),
"service_used": "Spotify (Primary)",
"quality_profile": "NORMAL",
"convert_to": None,
"bitrate": None,
}
add_entry_to_history(sample_data_complete)
sample_data_error = {
'task_id': 'test_task_456',
'download_type': 'album',
'item_name': 'Another Album',
'item_artist': 'Another Artist',
'item_album': 'Another Album', # For albums, item_name and item_album are often the same
'item_url': 'http://spotify.com/album/456',
'spotify_id': '456',
'status_final': 'ERROR',
'error_message': 'Download failed due to network issue.',
'timestamp_added': time.time() - 7200,
'timestamp_completed': time.time() - 60,
'original_request_json': json.dumps({'param2': 'value2'}),
'last_status_obj_json': json.dumps({'status': 'error', 'error': 'Network issue'}),
'service_used': 'Deezer',
'quality_profile': 'MP3_320',
'convert_to': 'mp3',
'bitrate': '320'
"task_id": "test_task_456",
"download_type": "album",
"item_name": "Another Album",
"item_artist": "Another Artist",
"item_album": "Another Album", # For albums, item_name and item_album are often the same
"item_url": "http://spotify.com/album/456",
"spotify_id": "456",
"status_final": "ERROR",
"error_message": "Download failed due to network issue.",
"timestamp_added": time.time() - 7200,
"timestamp_completed": time.time() - 60,
"original_request_json": json.dumps({"param2": "value2"}),
"last_status_obj_json": json.dumps(
{"status": "error", "error": "Network issue"}
),
"service_used": "Deezer",
"quality_profile": "MP3_320",
"convert_to": "mp3",
"bitrate": "320",
}
add_entry_to_history(sample_data_error)
# Test updating an entry
updated_data_complete = {
'task_id': 'test_task_123',
'download_type': 'track',
'item_name': 'Test Song (Updated)',
'item_artist': 'Test Artist',
'item_album': 'Test Album II',
'item_url': 'http://spotify.com/track/123',
'spotify_id': '123',
'status_final': 'COMPLETED',
'error_message': None,
'timestamp_added': time.time() - 3600,
'timestamp_completed': time.time() + 100, # Updated completion time
'original_request_json': json.dumps({'param1': 'value1', 'new_param': 'added'}),
'last_status_obj_json': json.dumps({'status': 'complete', 'message': 'Finished! With update.'}),
'service_used': 'Spotify (Deezer Fallback)',
'quality_profile': 'HIGH',
'convert_to': 'flac',
'bitrate': None
"task_id": "test_task_123",
"download_type": "track",
"item_name": "Test Song (Updated)",
"item_artist": "Test Artist",
"item_album": "Test Album II",
"item_url": "http://spotify.com/track/123",
"spotify_id": "123",
"status_final": "COMPLETED",
"error_message": None,
"timestamp_added": time.time() - 3600,
"timestamp_completed": time.time() + 100, # Updated completion time
"original_request_json": json.dumps({"param1": "value1", "new_param": "added"}),
"last_status_obj_json": json.dumps(
{"status": "complete", "message": "Finished! With update."}
),
"service_used": "Spotify (Deezer Fallback)",
"quality_profile": "HIGH",
"convert_to": "flac",
"bitrate": None,
}
add_entry_to_history(updated_data_complete)
@@ -310,13 +376,17 @@ if __name__ == '__main__':
print(entry)
print("\nFetching history entries (sorted by item_name ASC, limit 2, offset 1):")
entries_sorted, total_sorted = get_history_entries(limit=2, offset=1, sort_by='item_name', sort_order='ASC')
entries_sorted, total_sorted = get_history_entries(
limit=2, offset=1, sort_by="item_name", sort_order="ASC"
)
print(f"Total entries (should be same as above): {total_sorted}")
for entry in entries_sorted:
print(entry)
print("\nFetching history entries with filter (status_final = COMPLETED):")
entries_filtered, total_filtered = get_history_entries(filters={'status_final': 'COMPLETED'})
entries_filtered, total_filtered = get_history_entries(
filters={"status_final": "COMPLETED"}
)
print(f"Total COMPLETED entries: {total_filtered}")
for entry in entries_filtered:
print(entry)
print(entry)

View File

@@ -1,11 +1,9 @@
import os
import json
import traceback
from deezspot.spotloader import SpoLogin
from deezspot.deezloader import DeeLogin
from pathlib import Path
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
from routes.utils.celery_config import get_config_params
def download_playlist(
url,
@@ -23,56 +21,68 @@ def download_playlist(
max_retries=3,
progress_callback=None,
convert_to=None,
bitrate=None
bitrate=None,
):
try:
# Detect URL source (Spotify or Deezer) from URL
is_spotify_url = 'open.spotify.com' in url.lower()
is_deezer_url = 'deezer.com' in url.lower()
service = ''
is_spotify_url = "open.spotify.com" in url.lower()
is_deezer_url = "deezer.com" in url.lower()
service = ""
if is_spotify_url:
service = 'spotify'
service = "spotify"
elif is_deezer_url:
service = 'deezer'
service = "deezer"
else:
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
print(f"ERROR: {error_msg}")
raise ValueError(error_msg)
print(f"DEBUG: playlist.py - Service determined from URL: {service}")
print(f"DEBUG: playlist.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'")
print(
f"DEBUG: playlist.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
)
# Get global Spotify API credentials
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds()
global_spotify_client_id, global_spotify_client_secret = (
_get_global_spotify_api_creds()
)
if not global_spotify_client_id or not global_spotify_client_secret:
warning_msg = "WARN: playlist.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
print(warning_msg)
if service == 'spotify':
if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None: quality = 'FLAC' # Deezer quality for first attempt
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails)
if service == "spotify":
if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None:
quality = "FLAC" # Deezer quality for first attempt
if fall_quality is None:
fall_quality = (
"HIGH" # Spotify quality for fallback (if Deezer fails)
)
deezer_error = None
try:
# Attempt 1: Deezer via download_playlistspo (using 'fallback' as Deezer account name)
print(f"DEBUG: playlist.py - Spotify URL. Attempt 1: Deezer (account: {fallback})")
deezer_fallback_creds = get_credential('deezer', fallback)
arl = deezer_fallback_creds.get('arl')
print(
f"DEBUG: playlist.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
)
deezer_fallback_creds = get_credential("deezer", fallback)
arl = deezer_fallback_creds.get("arl")
if not arl:
raise ValueError(f"ARL not found for Deezer account '{fallback}'.")
raise ValueError(
f"ARL not found for Deezer account '{fallback}'."
)
dl = DeeLogin(
arl=arl,
spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback
progress_callback=progress_callback,
)
dl.download_playlistspo(
link_playlist=url, # Spotify URL
link_playlist=url, # Spotify URL
output_dir="./downloads",
quality_download=quality, # Deezer quality
quality_download=quality, # Deezer quality
recursive_quality=True,
recursive_download=False,
not_interface=False,
@@ -85,35 +95,50 @@ def download_playlist(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL."
)
print(f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL.")
except Exception as e:
deezer_error = e
print(f"ERROR: playlist.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}")
print(
f"ERROR: playlist.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
)
traceback.print_exc()
print(f"DEBUG: playlist.py - Attempting Spotify direct download (account: {main} for blob)...")
print(
f"DEBUG: playlist.py - Attempting Spotify direct download (account: {main} for blob)..."
)
# Attempt 2: Spotify direct via download_playlist (using 'main' as Spotify account for blob)
try:
if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
spotify_main_creds = get_credential('spotify', main) # For blob path
blob_file_path = spotify_main_creds.get('blob_file_path')
if (
not global_spotify_client_id
or not global_spotify_client_secret
):
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
spotify_main_creds = get_credential(
"spotify", main
) # For blob path
blob_file_path = spotify_main_creds.get("blob_file_path")
if not Path(blob_file_path).exists():
raise FileNotFoundError(f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'")
raise FileNotFoundError(
f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'"
)
spo = SpoLogin(
credentials_path=blob_file_path,
spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback
progress_callback=progress_callback,
)
spo.download_playlist(
link_playlist=url, # Spotify URL
link_playlist=url, # Spotify URL
output_dir="./downloads",
quality_download=fall_quality, # Spotify quality
quality_download=fall_quality, # Spotify quality
recursive_quality=True,
recursive_download=False,
not_interface=False,
@@ -127,38 +152,49 @@ def download_playlist(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful."
)
print(f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful.")
except Exception as e2:
print(f"ERROR: playlist.py - Spotify direct download (account: {main} for blob) also failed: {e2}")
print(
f"ERROR: playlist.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
)
raise RuntimeError(
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
f"Deezer error: {deezer_error}, Spotify error: {e2}"
) from e2
else:
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
if quality is None: quality = 'HIGH' # Default Spotify quality
print(f"DEBUG: playlist.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}")
if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
if quality is None:
quality = "HIGH" # Default Spotify quality
print(
f"DEBUG: playlist.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
)
spotify_main_creds = get_credential('spotify', main) # For blob path
blob_file_path = spotify_main_creds.get('blob_file_path')
if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
spotify_main_creds = get_credential("spotify", main) # For blob path
blob_file_path = spotify_main_creds.get("blob_file_path")
if not Path(blob_file_path).exists():
raise FileNotFoundError(f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'")
raise FileNotFoundError(
f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'"
)
spo = SpoLogin(
credentials_path=blob_file_path,
spotify_client_id=global_spotify_client_id,
spotify_client_secret=global_spotify_client_secret,
progress_callback=progress_callback
progress_callback=progress_callback,
)
spo.download_playlist(
link_playlist=url,
output_dir="./downloads",
quality_download=quality,
quality_download=quality,
recursive_quality=True,
recursive_download=False,
not_interface=False,
@@ -172,30 +208,35 @@ def download_playlist(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful.")
elif service == 'deezer':
print(
f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful."
)
elif service == "deezer":
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
if quality is None: quality = 'FLAC' # Default Deezer quality
print(f"DEBUG: playlist.py - Deezer URL. Direct download with Deezer account: {main}")
deezer_main_creds = get_credential('deezer', main) # For ARL
arl = deezer_main_creds.get('arl')
if quality is None:
quality = "FLAC" # Default Deezer quality
print(
f"DEBUG: playlist.py - Deezer URL. Direct download with Deezer account: {main}"
)
deezer_main_creds = get_credential("deezer", main) # For ARL
arl = deezer_main_creds.get("arl")
if not arl:
raise ValueError(f"ARL not found for Deezer account '{main}'.")
dl = DeeLogin(
arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback
arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback,
)
dl.download_playlistdee( # Deezer URL, download via Deezer
dl.download_playlistdee( # Deezer URL, download via Deezer
link_playlist=url,
output_dir="./downloads",
quality_download=quality,
recursive_quality=False, # Usually False for playlists to get individual track qualities
recursive_quality=False, # Usually False for playlists to get individual track qualities
recursive_download=False,
make_zip=False,
custom_dir_format=custom_dir_format,
@@ -206,9 +247,11 @@ def download_playlist(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful."
)
print(f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful.")
else:
# Should be caught by initial service check, but as a safeguard
raise ValueError(f"Unsupported service determined: {service}")

View File

@@ -1,50 +1,58 @@
from deezspot.easy_spoty import Spo
import json
from pathlib import Path
import logging
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
# Configure logger
logger = logging.getLogger(__name__)
def search(
query: str,
search_type: str,
limit: int = 3,
main: str = None
) -> dict:
logger.info(f"Search requested: query='{query}', type={search_type}, limit={limit}, main_account_name={main}")
def search(query: str, search_type: str, limit: int = 3, main: str = None) -> dict:
logger.info(
f"Search requested: query='{query}', type={search_type}, limit={limit}, main_account_name={main}"
)
client_id, client_secret = _get_global_spotify_api_creds()
if not client_id or not client_secret:
logger.error("Global Spotify API client_id or client_secret not configured in ./data/creds/search.json.")
raise ValueError("Spotify API credentials are not configured globally for search.")
logger.error(
"Global Spotify API client_id or client_secret not configured in ./data/creds/search.json."
)
raise ValueError(
"Spotify API credentials are not configured globally for search."
)
if main:
logger.debug(f"Spotify account context '{main}' was provided for search. API keys are global, but this account might be used for other context by Spo if relevant.")
logger.debug(
f"Spotify account context '{main}' was provided for search. API keys are global, but this account might be used for other context by Spo if relevant."
)
try:
get_credential('spotify', main)
get_credential("spotify", main)
logger.debug(f"Spotify account '{main}' exists.")
except FileNotFoundError:
logger.warning(f"Spotify account '{main}' provided for search context not found in credentials. Search will proceed with global API keys.")
logger.warning(
f"Spotify account '{main}' provided for search context not found in credentials. Search will proceed with global API keys."
)
except Exception as e:
logger.warning(f"Error checking existence of Spotify account '{main}': {e}. Search will proceed with global API keys.")
logger.warning(
f"Error checking existence of Spotify account '{main}': {e}. Search will proceed with global API keys."
)
else:
logger.debug("No specific 'main' account context provided for search. Using global API keys.")
logger.debug(f"Initializing Spotify client with global API credentials for search.")
logger.debug(
"No specific 'main' account context provided for search. Using global API keys."
)
logger.debug("Initializing Spotify client with global API credentials for search.")
Spo.__init__(client_id, client_secret)
logger.debug(f"Executing Spotify search with query='{query}', type={search_type}, limit={limit}")
logger.debug(
f"Executing Spotify search with query='{query}', type={search_type}, limit={limit}"
)
try:
spotify_response = Spo.search(
query=query,
search_type=search_type,
limit=limit
)
spotify_response = Spo.search(query=query, search_type=search_type, limit=limit)
logger.info(f"Search completed successfully for query: '{query}'")
return spotify_response
except Exception as e:
logger.error(f"Error during Spotify search for query '{query}': {e}", exc_info=True)
logger.error(
f"Error during Spotify search for query '{query}': {e}", exc_info=True
)
raise

View File

@@ -1,11 +1,12 @@
import os
import json
import traceback
from deezspot.spotloader import SpoLogin
from deezspot.deezloader import DeeLogin
from pathlib import Path
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds, get_spotify_blob_path
from routes.utils.celery_config import get_config_params
from routes.utils.credentials import (
get_credential,
_get_global_spotify_api_creds,
get_spotify_blob_path,
)
def download_track(
url,
@@ -23,28 +24,32 @@ def download_track(
max_retries=3,
progress_callback=None,
convert_to=None,
bitrate=None
bitrate=None,
):
try:
# Detect URL source (Spotify or Deezer) from URL
is_spotify_url = 'open.spotify.com' in url.lower()
is_deezer_url = 'deezer.com' in url.lower()
service = ''
is_spotify_url = "open.spotify.com" in url.lower()
is_deezer_url = "deezer.com" in url.lower()
service = ""
if is_spotify_url:
service = 'spotify'
service = "spotify"
elif is_deezer_url:
service = 'deezer'
service = "deezer"
else:
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
print(f"ERROR: {error_msg}")
raise ValueError(error_msg)
print(f"DEBUG: track.py - Service determined from URL: {service}")
print(f"DEBUG: track.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'")
print(
f"DEBUG: track.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
)
# Get global Spotify API credentials for SpoLogin and DeeLogin (if it uses Spotify search)
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds()
global_spotify_client_id, global_spotify_client_secret = (
_get_global_spotify_api_creds()
)
if not global_spotify_client_id or not global_spotify_client_secret:
# This is a critical failure if Spotify operations are involved
warning_msg = "WARN: track.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
@@ -52,31 +57,39 @@ def download_track(
# Depending on flow, might want to raise error here if service is 'spotify'
# For now, let it proceed and fail at SpoLogin/DeeLogin init if keys are truly needed and missing.
if service == 'spotify':
if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None: quality = 'FLAC' # Deezer quality for first attempt
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails)
if service == "spotify":
if fallback: # Fallback is a Deezer account name for a Spotify URL
if quality is None:
quality = "FLAC" # Deezer quality for first attempt
if fall_quality is None:
fall_quality = (
"HIGH" # Spotify quality for fallback (if Deezer fails)
)
deezer_error = None
try:
# Attempt 1: Deezer via download_trackspo (using 'fallback' as Deezer account name)
print(f"DEBUG: track.py - Spotify URL. Attempt 1: Deezer (account: {fallback})")
deezer_fallback_creds = get_credential('deezer', fallback)
arl = deezer_fallback_creds.get('arl')
print(
f"DEBUG: track.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
)
deezer_fallback_creds = get_credential("deezer", fallback)
arl = deezer_fallback_creds.get("arl")
if not arl:
raise ValueError(f"ARL not found for Deezer account '{fallback}'.")
raise ValueError(
f"ARL not found for Deezer account '{fallback}'."
)
dl = DeeLogin(
arl=arl,
spotify_client_id=global_spotify_client_id, # Global creds
spotify_client_secret=global_spotify_client_secret, # Global creds
progress_callback=progress_callback
spotify_client_id=global_spotify_client_id, # Global creds
spotify_client_secret=global_spotify_client_secret, # Global creds
progress_callback=progress_callback,
)
# download_trackspo means: Spotify URL, download via Deezer
dl.download_trackspo(
link_track=url, # Spotify URL
link_track=url, # Spotify URL
output_dir="./downloads",
quality_download=quality, # Deezer quality
quality_download=quality, # Deezer quality
recursive_quality=False,
recursive_download=False,
not_interface=False,
@@ -87,35 +100,52 @@ def download_track(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL."
)
print(f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL.")
except Exception as e:
deezer_error = e
print(f"ERROR: track.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}")
print(
f"ERROR: track.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
)
traceback.print_exc()
print(f"DEBUG: track.py - Attempting Spotify direct download (account: {main})...")
print(
f"DEBUG: track.py - Attempting Spotify direct download (account: {main})..."
)
# Attempt 2: Spotify direct via download_track (using 'main' as Spotify account for blob)
try:
if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
if (
not global_spotify_client_id
or not global_spotify_client_secret
):
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
# Use get_spotify_blob_path directly
blob_file_path = get_spotify_blob_path(main)
if not blob_file_path.exists(): # Check existence on the Path object
raise FileNotFoundError(f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'")
blob_file_path = get_spotify_blob_path(main)
if (
not blob_file_path.exists()
): # Check existence on the Path object
raise FileNotFoundError(
f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'"
)
spo = SpoLogin(
credentials_path=str(blob_file_path), # Account specific blob
spotify_client_id=global_spotify_client_id, # Global API keys
spotify_client_secret=global_spotify_client_secret, # Global API keys
progress_callback=progress_callback
credentials_path=str(
blob_file_path
), # Account specific blob
spotify_client_id=global_spotify_client_id, # Global API keys
spotify_client_secret=global_spotify_client_secret, # Global API keys
progress_callback=progress_callback,
)
spo.download_track(
link_track=url, # Spotify URL
link_track=url, # Spotify URL
output_dir="./downloads",
quality_download=fall_quality, # Spotify quality
quality_download=fall_quality, # Spotify quality
recursive_quality=False,
recursive_download=False,
not_interface=False,
@@ -128,38 +158,49 @@ def download_track(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful."
)
print(f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful.")
except Exception as e2:
print(f"ERROR: track.py - Spotify direct download (account: {main} for blob) also failed: {e2}")
print(
f"ERROR: track.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
)
raise RuntimeError(
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
f"Deezer error: {deezer_error}, Spotify error: {e2}"
) from e2
else:
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
if quality is None: quality = 'HIGH' # Default Spotify quality
print(f"DEBUG: track.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}")
if quality is None:
quality = "HIGH" # Default Spotify quality
print(
f"DEBUG: track.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
)
if not global_spotify_client_id or not global_spotify_client_secret:
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
raise ValueError(
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
)
# Use get_spotify_blob_path directly
blob_file_path = get_spotify_blob_path(main)
if not blob_file_path.exists(): # Check existence on the Path object
raise FileNotFoundError(f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'")
if not blob_file_path.exists(): # Check existence on the Path object
raise FileNotFoundError(
f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'"
)
spo = SpoLogin(
credentials_path=str(blob_file_path), # Account specific blob
spotify_client_id=global_spotify_client_id, # Global API keys
spotify_client_secret=global_spotify_client_secret, # Global API keys
progress_callback=progress_callback
credentials_path=str(blob_file_path), # Account specific blob
spotify_client_id=global_spotify_client_id, # Global API keys
spotify_client_secret=global_spotify_client_secret, # Global API keys
progress_callback=progress_callback,
)
spo.download_track(
link_track=url,
output_dir="./downloads",
quality_download=quality,
quality_download=quality,
recursive_quality=False,
recursive_download=False,
not_interface=False,
@@ -172,26 +213,31 @@ def download_track(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful.")
elif service == 'deezer':
print(
f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful."
)
elif service == "deezer":
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
if quality is None: quality = 'FLAC' # Default Deezer quality
print(f"DEBUG: track.py - Deezer URL. Direct download with Deezer account: {main}")
deezer_main_creds = get_credential('deezer', main) # For ARL
arl = deezer_main_creds.get('arl')
if quality is None:
quality = "FLAC" # Default Deezer quality
print(
f"DEBUG: track.py - Deezer URL. Direct download with Deezer account: {main}"
)
deezer_main_creds = get_credential("deezer", main) # For ARL
arl = deezer_main_creds.get("arl")
if not arl:
raise ValueError(f"ARL not found for Deezer account '{main}'.")
dl = DeeLogin(
arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys for internal Spo use by DeeLogin
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback
arl=arl, # Account specific ARL
spotify_client_id=global_spotify_client_id, # Global Spotify keys for internal Spo use by DeeLogin
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
progress_callback=progress_callback,
)
dl.download_trackdee( # Deezer URL, download via Deezer
dl.download_trackdee( # Deezer URL, download via Deezer
link_track=url,
output_dir="./downloads",
quality_download=quality,
@@ -205,12 +251,14 @@ def download_track(
retry_delay_increase=retry_delay_increase,
max_retries=max_retries,
convert_to=convert_to,
bitrate=bitrate
bitrate=bitrate,
)
print(
f"DEBUG: track.py - Direct Deezer download (account: {main}) successful."
)
print(f"DEBUG: track.py - Direct Deezer download (account: {main}) successful.")
else:
# Should be caught by initial service check, but as a safeguard
raise ValueError(f"Unsupported service determined: {service}")
except Exception as e:
except Exception:
traceback.print_exc()
raise

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@ import threading
import logging
import json
from pathlib import Path
from typing import Any, List, Dict
from routes.utils.watch.db import (
get_watched_playlists,
@@ -12,29 +13,30 @@ from routes.utils.watch.db import (
update_playlist_snapshot,
mark_tracks_as_not_present_in_spotify,
# Artist watch DB functions
init_artists_db,
get_watched_artists,
get_watched_artist,
get_artist_album_ids_from_db,
add_or_update_album_for_artist, # Renamed from add_album_to_artist_db
update_artist_metadata_after_check # Renamed from update_artist_metadata
update_artist_metadata_after_check, # Renamed from update_artist_metadata
)
from routes.utils.get_info import get_spotify_info # To fetch playlist, track, artist, and album details
from routes.utils.get_info import (
get_spotify_info,
) # To fetch playlist, track, artist, and album details
from routes.utils.celery_queue_manager import download_queue_manager
logger = logging.getLogger(__name__)
CONFIG_FILE_PATH = Path('./data/config/watch.json')
CONFIG_FILE_PATH = Path("./data/config/watch.json")
STOP_EVENT = threading.Event()
DEFAULT_WATCH_CONFIG = {
"enabled": False,
"watchPollIntervalSeconds": 3600,
"max_tracks_per_run": 50, # For playlists
"watchedArtistAlbumGroup": ["album", "single"], # Default for artists
"max_tracks_per_run": 50, # For playlists
"watchedArtistAlbumGroup": ["album", "single"], # Default for artists
"delay_between_playlists_seconds": 2,
"delay_between_artists_seconds": 5 # Added for artists
"delay_between_artists_seconds": 5, # Added for artists
}
def get_watch_config():
"""Loads the watch configuration from watch.json.
Creates the file with defaults if it doesn't exist.
@@ -45,43 +47,56 @@ def get_watch_config():
CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
if not CONFIG_FILE_PATH.exists():
logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default watch config.")
with open(CONFIG_FILE_PATH, 'w') as f:
logger.info(
f"{CONFIG_FILE_PATH} not found. Creating with default watch config."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(DEFAULT_WATCH_CONFIG, f, indent=2)
return DEFAULT_WATCH_CONFIG.copy()
with open(CONFIG_FILE_PATH, 'r') as f:
with open(CONFIG_FILE_PATH, "r") as f:
config = json.load(f)
updated = False
for key, value in DEFAULT_WATCH_CONFIG.items():
if key not in config:
config[key] = value
updated = True
if updated:
logger.info(f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults.")
with open(CONFIG_FILE_PATH, 'w') as f:
logger.info(
f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(config, f, indent=2)
return config
except Exception as e:
logger.error(f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}", exc_info=True)
return DEFAULT_WATCH_CONFIG.copy() # Fallback
logger.error(
f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}",
exc_info=True,
)
return DEFAULT_WATCH_CONFIG.copy() # Fallback
def construct_spotify_url(item_id, item_type="track"):
return f"https://open.spotify.com/{item_type}/{item_id}"
def check_watched_playlists(specific_playlist_id: str = None):
"""Checks watched playlists for new tracks and queues downloads.
If specific_playlist_id is provided, only that playlist is checked.
"""
logger.info(f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}")
logger.info(
f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}"
)
config = get_watch_config()
if specific_playlist_id:
playlist_obj = get_watched_playlist(specific_playlist_id)
playlist_obj = get_watched_playlist(specific_playlist_id)
if not playlist_obj:
logger.error(f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database.")
logger.error(
f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database."
)
return
watched_playlists_to_check = [playlist_obj]
else:
@@ -92,25 +107,36 @@ def check_watched_playlists(specific_playlist_id: str = None):
return
for playlist_in_db in watched_playlists_to_check:
playlist_spotify_id = playlist_in_db['spotify_id']
playlist_name = playlist_in_db['name']
logger.info(f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})...")
playlist_spotify_id = playlist_in_db["spotify_id"]
playlist_name = playlist_in_db["name"]
logger.info(
f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})..."
)
try:
# For playlists, we fetch all tracks in one go usually (Spotify API limit permitting)
current_playlist_data_from_api = get_spotify_info(playlist_spotify_id, "playlist")
if not current_playlist_data_from_api or 'tracks' not in current_playlist_data_from_api:
logger.error(f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}.")
current_playlist_data_from_api = get_spotify_info(
playlist_spotify_id, "playlist"
)
if (
not current_playlist_data_from_api
or "tracks" not in current_playlist_data_from_api
):
logger.error(
f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}."
)
continue
api_snapshot_id = current_playlist_data_from_api.get('snapshot_id')
api_total_tracks = current_playlist_data_from_api.get('tracks', {}).get('total', 0)
api_snapshot_id = current_playlist_data_from_api.get("snapshot_id")
api_total_tracks = current_playlist_data_from_api.get("tracks", {}).get(
"total", 0
)
# Paginate through playlist tracks if necessary
all_api_track_items = []
offset = 0
limit = 50 # Spotify API limit for playlist items
limit = 50 # Spotify API limit for playlist items
while True:
# Re-fetch with pagination if tracks.next is present, or on first call.
# get_spotify_info for playlist should ideally handle pagination internally if asked for all tracks.
@@ -120,103 +146,152 @@ def check_watched_playlists(specific_playlist_id: str = None):
# Modifying get_spotify_info is outside current scope, so we'll assume it returns ALL items for a playlist.
# If it doesn't, this part would need adjustment for robust pagination.
# For now, we use the items from the initial fetch.
paginated_playlist_data = get_spotify_info(playlist_spotify_id, "playlist", offset=offset, limit=limit)
if not paginated_playlist_data or 'tracks' not in paginated_playlist_data:
break
page_items = paginated_playlist_data.get('tracks', {}).get('items', [])
paginated_playlist_data = get_spotify_info(
playlist_spotify_id, "playlist", offset=offset, limit=limit
)
if (
not paginated_playlist_data
or "tracks" not in paginated_playlist_data
):
break
page_items = paginated_playlist_data.get("tracks", {}).get("items", [])
if not page_items:
break
all_api_track_items.extend(page_items)
if paginated_playlist_data.get('tracks', {}).get('next'):
if paginated_playlist_data.get("tracks", {}).get("next"):
offset += limit
else:
break
current_api_track_ids = set()
api_track_id_to_item_map = {}
for item in all_api_track_items: # Use all_api_track_items
track = item.get('track')
if track and track.get('id') and not track.get('is_local'):
track_id = track['id']
for item in all_api_track_items: # Use all_api_track_items
track = item.get("track")
if track and track.get("id") and not track.get("is_local"):
track_id = track["id"]
current_api_track_ids.add(track_id)
api_track_id_to_item_map[track_id] = item
api_track_id_to_item_map[track_id] = item
db_track_ids = get_playlist_track_ids_from_db(playlist_spotify_id)
new_track_ids_for_download = current_api_track_ids - db_track_ids
queued_for_download_count = 0
if new_track_ids_for_download:
logger.info(f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download.")
logger.info(
f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download."
)
for track_id in new_track_ids_for_download:
api_item = api_track_id_to_item_map.get(track_id)
if not api_item or not api_item.get("track"):
logger.warning(f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue.")
logger.warning(
f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue."
)
continue
track_to_queue = api_item["track"]
task_payload = {
"download_type": "track",
"url": construct_spotify_url(track_id, "track"),
"name": track_to_queue.get('name', 'Unknown Track'),
"artist": ", ".join([a['name'] for a in track_to_queue.get('artists', []) if a.get('name')]),
"name": track_to_queue.get("name", "Unknown Track"),
"artist": ", ".join(
[
a["name"]
for a in track_to_queue.get("artists", [])
if a.get("name")
]
),
"orig_request": {
"source": "playlist_watch",
"playlist_id": playlist_spotify_id,
"playlist_name": playlist_name,
"track_spotify_id": track_id,
"track_item_for_db": api_item # Pass full API item for DB update on completion
}
"track_item_for_db": api_item, # Pass full API item for DB update on completion
},
# "track_details_for_db" was old name, using track_item_for_db consistent with celery_tasks
}
try:
task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True)
if task_id_or_none: # Task was newly queued
logger.info(f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'.")
task_id_or_none = download_queue_manager.add_task(
task_payload, from_watch_job=True
)
if task_id_or_none: # Task was newly queued
logger.info(
f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'."
)
queued_for_download_count += 1
# If task_id_or_none is None, it was a duplicate and not re-queued, Celery manager handles logging.
except Exception as e:
logger.error(f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}", exc_info=True)
logger.info(f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'.")
logger.error(
f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}",
exc_info=True,
)
logger.info(
f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'."
)
else:
logger.info(f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'.")
logger.info(
f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'."
)
# Update DB for tracks that are still present in API (e.g. update 'last_seen_in_spotify')
# add_tracks_to_playlist_db handles INSERT OR REPLACE, updating existing entries.
# We should pass all current API tracks to ensure their `last_seen_in_spotify` and `is_present_in_spotify` are updated.
if all_api_track_items: # If there are any tracks in the API for this playlist
logger.info(f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'.")
add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items)
if (
all_api_track_items
): # If there are any tracks in the API for this playlist
logger.info(
f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'."
)
add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items)
removed_db_ids = db_track_ids - current_api_track_ids
if removed_db_ids:
logger.info(f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB.")
mark_tracks_as_not_present_in_spotify(playlist_spotify_id, list(removed_db_ids))
logger.info(
f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB."
)
mark_tracks_as_not_present_in_spotify(
playlist_spotify_id, list(removed_db_ids)
)
update_playlist_snapshot(playlist_spotify_id, api_snapshot_id, api_total_tracks) # api_total_tracks from initial fetch
logger.info(f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}.")
update_playlist_snapshot(
playlist_spotify_id, api_snapshot_id, api_total_tracks
) # api_total_tracks from initial fetch
logger.info(
f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}."
)
except Exception as e:
logger.error(f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}", exc_info=True)
time.sleep(max(1, config.get("delay_between_playlists_seconds", 2)))
logger.error(
f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}",
exc_info=True,
)
time.sleep(max(1, config.get("delay_between_playlists_seconds", 2)))
logger.info("Playlist Watch Manager: Finished checking all watched playlists.")
def check_watched_artists(specific_artist_id: str = None):
"""Checks watched artists for new albums and queues downloads."""
logger.info(f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}")
logger.info(
f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}"
)
config = get_watch_config()
watched_album_groups = [g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"])]
logger.info(f"Artist Watch Manager: Watching for album groups: {watched_album_groups}")
watched_album_groups = [
g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"])
]
logger.info(
f"Artist Watch Manager: Watching for album groups: {watched_album_groups}"
)
if specific_artist_id:
artist_obj_in_db = get_watched_artist(specific_artist_id)
if not artist_obj_in_db:
logger.error(f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database.")
logger.error(
f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database."
)
return
artists_to_check = [artist_obj_in_db]
else:
@@ -227,200 +302,282 @@ def check_watched_artists(specific_artist_id: str = None):
return
for artist_in_db in artists_to_check:
artist_spotify_id = artist_in_db['spotify_id']
artist_name = artist_in_db['name']
logger.info(f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})...")
artist_spotify_id = artist_in_db["spotify_id"]
artist_name = artist_in_db["name"]
logger.info(
f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})..."
)
try:
# Spotify API for artist albums is paginated.
# We need to fetch all albums. get_spotify_info with type 'artist-albums' should handle this.
# Let's assume get_spotify_info(artist_id, 'artist-albums') returns a list of all album objects.
# Or we implement pagination here.
all_artist_albums_from_api = []
all_artist_albums_from_api: List[Dict[str, Any]] = []
offset = 0
limit = 50 # Spotify API limit for artist albums
limit = 50 # Spotify API limit for artist albums
while True:
# The 'artist-albums' type for get_spotify_info needs to support pagination params.
# And return a list of album objects.
logger.debug(f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}")
artist_albums_page = get_spotify_info(artist_spotify_id, "artist_discography", limit=limit, offset=offset)
logger.debug(
f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}"
)
artist_albums_page = get_spotify_info(
artist_spotify_id, "artist_discography", limit=limit, offset=offset
)
if not artist_albums_page or not isinstance(artist_albums_page.get('items'), list):
logger.warning(f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}")
if not artist_albums_page or not isinstance(
artist_albums_page.get("items"), list
):
logger.warning(
f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}"
)
break
current_page_albums = artist_albums_page.get('items', [])
current_page_albums = artist_albums_page.get("items", [])
if not current_page_albums:
logger.info(f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}.")
logger.info(
f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}."
)
break
logger.debug(f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'.")
logger.debug(
f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'."
)
all_artist_albums_from_api.extend(current_page_albums)
# Correct pagination: Check if Spotify indicates a next page URL
# The `next` field in Spotify API responses is a URL to the next page or null.
if artist_albums_page.get('next'):
offset += limit # CORRECT: Increment offset by the limit used for the request
if artist_albums_page.get("next"):
offset += limit # CORRECT: Increment offset by the limit used for the request
else:
logger.info(f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}.")
logger.info(
f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}."
)
break
# total_albums_from_api = len(all_artist_albums_from_api)
# total_albums_from_api = len(all_artist_albums_from_api)
# Use the 'total' field from the API response for a more accurate count of all available albums (matching current API filter if any)
api_reported_total_albums = artist_albums_page.get('total', 0) if 'artist_albums_page' in locals() and artist_albums_page else len(all_artist_albums_from_api)
logger.info(f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}.")
api_reported_total_albums = (
artist_albums_page.get("total", 0)
if "artist_albums_page" in locals() and artist_albums_page
else len(all_artist_albums_from_api)
)
logger.info(
f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}."
)
db_album_ids = get_artist_album_ids_from_db(artist_spotify_id)
logger.info(f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes.")
logger.info(
f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes."
)
queued_for_download_count = 0
processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination)
processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination)
for album_data in all_artist_albums_from_api:
album_id = album_data.get('id')
album_name = album_data.get('name', 'Unknown Album')
album_group = album_data.get('album_group', 'N/A').lower()
album_type = album_data.get('album_type', 'N/A').lower()
album_id = album_data.get("id")
album_name = album_data.get("name", "Unknown Album")
album_group = album_data.get("album_group", "N/A").lower()
album_type = album_data.get("album_type", "N/A").lower()
if not album_id:
logger.warning(f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}")
logger.warning(
f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}"
)
continue
if album_id in processed_album_ids_in_run:
logger.debug(f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping.")
logger.debug(
f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping."
)
continue
processed_album_ids_in_run.add(album_id)
# Filter based on watchedArtistAlbumGroup
# The album_group field is generally preferred for this type of categorization as per Spotify docs.
is_matching_group = album_group in watched_album_groups
logger.debug(f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}.")
logger.debug(
f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}."
)
if not is_matching_group:
logger.debug(f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}.")
logger.debug(
f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}."
)
continue
logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group.")
logger.info(
f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group."
)
if album_id not in db_album_ids:
logger.info(f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download.")
album_artists_list = album_data.get('artists', [])
album_main_artist_name = album_artists_list[0].get('name', 'Unknown Artist') if album_artists_list else 'Unknown Artist'
logger.info(
f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download."
)
album_artists_list = album_data.get("artists", [])
album_main_artist_name = (
album_artists_list[0].get("name", "Unknown Artist")
if album_artists_list
else "Unknown Artist"
)
task_payload = {
"download_type": "album", # Or "track" if downloading individual tracks of album later
"download_type": "album", # Or "track" if downloading individual tracks of album later
"url": construct_spotify_url(album_id, "album"),
"name": album_name,
"artist": album_main_artist_name, # Primary artist of the album
"artist": album_main_artist_name, # Primary artist of the album
"orig_request": {
"source": "artist_watch",
"artist_spotify_id": artist_spotify_id, # Watched artist
"artist_spotify_id": artist_spotify_id, # Watched artist
"artist_name": artist_name,
"album_spotify_id": album_id,
"album_data_for_db": album_data # Pass full API album object for DB update on completion/queuing
}
"album_data_for_db": album_data, # Pass full API album object for DB update on completion/queuing
},
}
try:
# Add to DB first with task_id, then queue. Or queue and add task_id to DB.
# Let's use add_or_update_album_for_artist to record it with a task_id before queuing.
# The celery_queue_manager.add_task might return None if it's a duplicate.
# Record the album in DB as being processed for download
# Task_id will be added if successfully queued
# We should call add_task first, and if it returns a task_id (not a duplicate), then update our DB.
task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True)
if task_id_or_none: # Task was newly queued
task_id_or_none = download_queue_manager.add_task(
task_payload, from_watch_job=True
)
if task_id_or_none: # Task was newly queued
# REMOVED: add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=task_id_or_none, is_download_complete=False)
# The album will be added/updated in the DB by celery_tasks.py upon successful download completion.
logger.info(f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success.")
logger.info(
f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success."
)
queued_for_download_count += 1
# If task_id_or_none is None, it was a duplicate. Celery manager handles logging.
except Exception as e:
logger.error(f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}", exc_info=True)
logger.error(
f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}",
exc_info=True,
)
else:
logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue.")
logger.info(
f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue."
)
# Optionally, update its entry (e.g. last_seen, or if details changed), but for now, we only queue new ones.
# add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=False) # would update added_to_db_at
logger.info(f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums.")
update_artist_metadata_after_check(artist_spotify_id, api_reported_total_albums)
logger.info(f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}.")
logger.info(
f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums."
)
update_artist_metadata_after_check(
artist_spotify_id, api_reported_total_albums
)
logger.info(
f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}."
)
except Exception as e:
logger.error(f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}", exc_info=True)
logger.error(
f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}",
exc_info=True,
)
time.sleep(max(1, config.get("delay_between_artists_seconds", 5)))
logger.info("Artist Watch Manager: Finished checking all watched artists.")
def playlist_watch_scheduler():
"""Periodically calls check_watched_playlists and check_watched_artists."""
logger.info("Watch Scheduler: Thread started.")
config = get_watch_config() # Load config once at start, or reload each loop? Reload each loop for dynamic changes.
while not STOP_EVENT.is_set():
current_config = get_watch_config() # Get latest config for this run
current_config = get_watch_config() # Get latest config for this run
interval = current_config.get("watchPollIntervalSeconds", 3600)
watch_enabled = current_config.get("enabled", False) # Get enabled status
watch_enabled = current_config.get("enabled", False) # Get enabled status
if not watch_enabled:
logger.info("Watch Scheduler: Watch feature is disabled in config. Skipping checks.")
STOP_EVENT.wait(interval) # Still respect poll interval for checking config again
continue # Skip to next iteration
logger.info(
"Watch Scheduler: Watch feature is disabled in config. Skipping checks."
)
STOP_EVENT.wait(
interval
) # Still respect poll interval for checking config again
continue # Skip to next iteration
try:
logger.info("Watch Scheduler: Starting playlist check run.")
check_watched_playlists()
logger.info("Watch Scheduler: Playlist check run completed.")
except Exception as e:
logger.error(f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}", exc_info=True)
logger.error(
f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}",
exc_info=True,
)
# Add a small delay between playlist and artist checks if desired
# time.sleep(current_config.get("delay_between_check_types_seconds", 10))
if STOP_EVENT.is_set(): break # Check stop event again before starting artist check
if STOP_EVENT.is_set():
break # Check stop event again before starting artist check
try:
logger.info("Watch Scheduler: Starting artist check run.")
check_watched_artists()
logger.info("Watch Scheduler: Artist check run completed.")
except Exception as e:
logger.error(f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}", exc_info=True)
logger.info(f"Watch Scheduler: All checks complete. Next run in {interval} seconds.")
STOP_EVENT.wait(interval)
logger.error(
f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}",
exc_info=True,
)
logger.info(
f"Watch Scheduler: All checks complete. Next run in {interval} seconds."
)
STOP_EVENT.wait(interval)
logger.info("Watch Scheduler: Thread stopped.")
# --- Global thread for the scheduler ---
_watch_scheduler_thread = None # Renamed from _playlist_watch_thread
def start_watch_manager(): # Renamed from start_playlist_watch_manager
# --- Global thread for the scheduler ---
_watch_scheduler_thread = None # Renamed from _playlist_watch_thread
def start_watch_manager(): # Renamed from start_playlist_watch_manager
global _watch_scheduler_thread
if _watch_scheduler_thread is None or not _watch_scheduler_thread.is_alive():
STOP_EVENT.clear()
# Initialize DBs on start
from routes.utils.watch.db import init_playlists_db, init_artists_db # Updated import
init_playlists_db() # For playlists
init_artists_db() # For artists
_watch_scheduler_thread = threading.Thread(target=playlist_watch_scheduler, daemon=True)
from routes.utils.watch.db import (
init_playlists_db,
init_artists_db,
) # Updated import
init_playlists_db() # For playlists
init_artists_db() # For artists
_watch_scheduler_thread = threading.Thread(
target=playlist_watch_scheduler, daemon=True
)
_watch_scheduler_thread.start()
logger.info("Watch Manager: Background scheduler started (includes playlists and artists).")
logger.info(
"Watch Manager: Background scheduler started (includes playlists and artists)."
)
else:
logger.info("Watch Manager: Background scheduler already running.")
def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
global _watch_scheduler_thread
if _watch_scheduler_thread and _watch_scheduler_thread.is_alive():
logger.info("Watch Manager: Stopping background scheduler...")
STOP_EVENT.set()
_watch_scheduler_thread.join(timeout=10)
STOP_EVENT.set()
_watch_scheduler_thread.join(timeout=10)
if _watch_scheduler_thread.is_alive():
logger.warning("Watch Manager: Scheduler thread did not stop in time.")
else:
@@ -429,5 +586,6 @@ def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
else:
logger.info("Watch Manager: Background scheduler not running.")
# If this module is imported, and you want to auto-start the manager, you could call start_watch_manager() here.
# However, it's usually better to explicitly start it from the main application/__init__.py.