Allegedly fixed #228 and #226. Implemented #204

This commit is contained in:
Xoconoch
2025-08-10 20:30:25 -06:00
parent 646d701816
commit 7c01f7806e
17 changed files with 1405 additions and 795 deletions

View File

@@ -1,7 +1,7 @@
fastapi==0.116.1 fastapi==0.116.1
uvicorn[standard]==0.35.0 uvicorn[standard]==0.35.0
celery==5.5.3 celery==5.5.3
deezspot-spotizerr==2.2.4 deezspot-spotizerr==2.2.6
httpx==0.28.1 httpx==0.28.1
bcrypt==4.2.1 bcrypt==4.2.1
PyJWT==2.10.1 PyJWT==2.10.1

View File

@@ -27,13 +27,17 @@ def download_album(
progress_callback=None, progress_callback=None,
convert_to=None, convert_to=None,
bitrate=None, bitrate=None,
artist_separator="; ",
recursive_quality=True,
_is_celery_task_execution=False, # Added to skip duplicate check from Celery task _is_celery_task_execution=False, # Added to skip duplicate check from Celery task
): ):
if not _is_celery_task_execution: if not _is_celery_task_execution:
existing_task = get_existing_task_id(url) # Check for duplicates only if not called by Celery task existing_task = get_existing_task_id(
url
) # Check for duplicates only if not called by Celery task
if existing_task: if existing_task:
raise DuplicateDownloadError( raise DuplicateDownloadError(
f"Download for this URL is already in progress.", "Download for this URL is already in progress.",
existing_task=existing_task, existing_task=existing_task,
) )
try: try:
@@ -96,7 +100,7 @@ def download_album(
link_album=url, # Spotify URL link_album=url, # Spotify URL
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, # Deezer quality quality_download=quality, # Deezer quality
recursive_quality=True, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
make_zip=False, make_zip=False,
@@ -109,6 +113,7 @@ def download_album(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL." f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL."
@@ -151,7 +156,7 @@ def download_album(
link_album=url, # Spotify URL link_album=url, # Spotify URL
output_dir="./downloads", output_dir="./downloads",
quality_download=fall_quality, # Spotify quality quality_download=fall_quality, # Spotify quality
recursive_quality=True, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
make_zip=False, make_zip=False,
@@ -165,6 +170,7 @@ def download_album(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful." f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful."
@@ -205,7 +211,7 @@ def download_album(
link_album=url, link_album=url,
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=True, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
make_zip=False, make_zip=False,
@@ -219,6 +225,7 @@ def download_album(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful." f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful."
@@ -246,7 +253,7 @@ def download_album(
link_album=url, link_album=url,
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=True, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
make_zip=False, make_zip=False,
custom_dir_format=custom_dir_format, custom_dir_format=custom_dir_format,
@@ -258,6 +265,7 @@ def download_album(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: album.py - Direct Deezer download (account: {main}) successful." f"DEBUG: album.py - Direct Deezer download (account: {main}) successful."

View File

@@ -44,6 +44,7 @@ DEFAULT_MAIN_CONFIG = {
"retry_delay_increase": 5, "retry_delay_increase": 5,
"convertTo": None, "convertTo": None,
"bitrate": None, "bitrate": None,
"artist_separator": "; ",
} }
@@ -123,12 +124,12 @@ task_default_routing_key = "downloads"
# Task routing - ensure SSE and utility tasks go to utility_tasks queue # Task routing - ensure SSE and utility tasks go to utility_tasks queue
task_routes = { task_routes = {
'routes.utils.celery_tasks.trigger_sse_update_task': {'queue': 'utility_tasks'}, "routes.utils.celery_tasks.trigger_sse_update_task": {"queue": "utility_tasks"},
'routes.utils.celery_tasks.cleanup_stale_errors': {'queue': 'utility_tasks'}, "routes.utils.celery_tasks.cleanup_stale_errors": {"queue": "utility_tasks"},
'routes.utils.celery_tasks.delayed_delete_task_data': {'queue': 'utility_tasks'}, "routes.utils.celery_tasks.delayed_delete_task_data": {"queue": "utility_tasks"},
'routes.utils.celery_tasks.download_track': {'queue': 'downloads'}, "routes.utils.celery_tasks.download_track": {"queue": "downloads"},
'routes.utils.celery_tasks.download_album': {'queue': 'downloads'}, "routes.utils.celery_tasks.download_album": {"queue": "downloads"},
'routes.utils.celery_tasks.download_playlist': {'queue': 'downloads'}, "routes.utils.celery_tasks.download_playlist": {"queue": "downloads"},
} }
# Celery task settings # Celery task settings
@@ -193,8 +194,8 @@ worker_disable_rate_limits = False
# Celery Beat schedule # Celery Beat schedule
beat_schedule = { beat_schedule = {
'cleanup-old-tasks': { "cleanup-old-tasks": {
'task': 'routes.utils.celery_tasks.cleanup_old_tasks', "task": "routes.utils.celery_tasks.cleanup_old_tasks",
'schedule': 3600.0, # Run every hour "schedule": 3600.0, # Run every hour
}, },
} }

View File

@@ -60,6 +60,8 @@ def get_config_params():
"retry_delay_increase": config.get("retry_delay_increase", 5), "retry_delay_increase": config.get("retry_delay_increase", 5),
"convertTo": config.get("convertTo", None), "convertTo": config.get("convertTo", None),
"bitrate": config.get("bitrate", None), "bitrate": config.get("bitrate", None),
"artist_separator": config.get("artist_separator", "; "),
"recursive_quality": config.get("recursive_quality", False),
} }
except Exception as e: except Exception as e:
logger.error(f"Error reading config for parameters: {e}") logger.error(f"Error reading config for parameters: {e}")
@@ -80,6 +82,8 @@ def get_config_params():
"retry_delay_increase": 5, "retry_delay_increase": 5,
"convertTo": None, # Default for conversion "convertTo": None, # Default for conversion
"bitrate": None, # Default for bitrate "bitrate": None, # Default for bitrate
"artist_separator": "; ",
"recursive_quality": False,
} }
@@ -95,7 +99,9 @@ def get_existing_task_id(url, download_type=None):
Returns: Returns:
str | None: The task ID of the existing active task, or None if no active duplicate is found. str | None: The task ID of the existing active task, or None if no active duplicate is found.
""" """
logger.debug(f"GET_EXISTING_TASK_ID: Checking for URL='{url}', type='{download_type}'") logger.debug(
f"GET_EXISTING_TASK_ID: Checking for URL='{url}', type='{download_type}'"
)
if not url: if not url:
logger.debug("GET_EXISTING_TASK_ID: No URL provided, returning None.") logger.debug("GET_EXISTING_TASK_ID: No URL provided, returning None.")
return None return None
@@ -119,64 +125,95 @@ def get_existing_task_id(url, download_type=None):
} }
logger.debug(f"GET_EXISTING_TASK_ID: Terminal states defined as: {TERMINAL_STATES}") logger.debug(f"GET_EXISTING_TASK_ID: Terminal states defined as: {TERMINAL_STATES}")
all_existing_tasks_summary = get_all_tasks() # This function already filters by default based on its own TERMINAL_STATES all_existing_tasks_summary = (
logger.debug(f"GET_EXISTING_TASK_ID: Found {len(all_existing_tasks_summary)} tasks from get_all_tasks(). Iterating...") get_all_tasks()
) # This function already filters by default based on its own TERMINAL_STATES
logger.debug(
f"GET_EXISTING_TASK_ID: Found {len(all_existing_tasks_summary)} tasks from get_all_tasks(). Iterating..."
)
for task_summary in all_existing_tasks_summary: for task_summary in all_existing_tasks_summary:
existing_task_id = task_summary.get("task_id") existing_task_id = task_summary.get("task_id")
if not existing_task_id: if not existing_task_id:
logger.debug("GET_EXISTING_TASK_ID: Skipping summary with no task_id.") logger.debug("GET_EXISTING_TASK_ID: Skipping summary with no task_id.")
continue continue
logger.debug(f"GET_EXISTING_TASK_ID: Processing existing task_id='{existing_task_id}' from summary.") logger.debug(
f"GET_EXISTING_TASK_ID: Processing existing task_id='{existing_task_id}' from summary."
)
# First, check the status of the task directly from its latest status record. # First, check the status of the task directly from its latest status record.
# get_all_tasks() might have its own view of terminal, but we re-check here for absolute certainty. # get_all_tasks() might have its own view of terminal, but we re-check here for absolute certainty.
existing_last_status_obj = get_last_task_status(existing_task_id) existing_last_status_obj = get_last_task_status(existing_task_id)
if not existing_last_status_obj: if not existing_last_status_obj:
logger.debug(f"GET_EXISTING_TASK_ID: No last status object for task_id='{existing_task_id}'. Skipping.") logger.debug(
f"GET_EXISTING_TASK_ID: No last status object for task_id='{existing_task_id}'. Skipping."
)
continue continue
# Extract status from standard structure (status_info.status) or fallback to top-level status # Extract status from standard structure (status_info.status) or fallback to top-level status
existing_status = None existing_status = None
if "status_info" in existing_last_status_obj and existing_last_status_obj["status_info"]: if (
"status_info" in existing_last_status_obj
and existing_last_status_obj["status_info"]
):
existing_status = existing_last_status_obj["status_info"].get("status") existing_status = existing_last_status_obj["status_info"].get("status")
if not existing_status: if not existing_status:
existing_status = existing_last_status_obj.get("status") existing_status = existing_last_status_obj.get("status")
logger.debug(f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}', last_status_obj='{existing_last_status_obj}', extracted status='{existing_status}'.") logger.debug(
f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}', last_status_obj='{existing_last_status_obj}', extracted status='{existing_status}'."
)
# If the task is in a terminal state, ignore it and move to the next one. # If the task is in a terminal state, ignore it and move to the next one.
if existing_status in TERMINAL_STATES: if existing_status in TERMINAL_STATES:
logger.debug(f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}' has terminal status='{existing_status}'. Skipping.") logger.debug(
f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}' has terminal status='{existing_status}'. Skipping."
)
continue continue
logger.debug(f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}' has ACTIVE status='{existing_status}'. Proceeding to check URL/type.") logger.debug(
f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}' has ACTIVE status='{existing_status}'. Proceeding to check URL/type."
)
# If the task is active, then check if its URL and type match. # If the task is active, then check if its URL and type match.
existing_task_info = get_task_info(existing_task_id) existing_task_info = get_task_info(existing_task_id)
if not existing_task_info: if not existing_task_info:
logger.debug(f"GET_EXISTING_TASK_ID: No task info for active task_id='{existing_task_id}'. Skipping.") logger.debug(
f"GET_EXISTING_TASK_ID: No task info for active task_id='{existing_task_id}'. Skipping."
)
continue continue
existing_url = existing_task_info.get("url") existing_url = existing_task_info.get("url")
logger.debug(f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}', info_url='{existing_url}'. Comparing with target_url='{url}'.") logger.debug(
f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}', info_url='{existing_url}'. Comparing with target_url='{url}'."
)
if existing_url != url: if existing_url != url:
logger.debug(f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}' URL mismatch. Skipping.") logger.debug(
f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}' URL mismatch. Skipping."
)
continue continue
if download_type: if download_type:
existing_type = existing_task_info.get("download_type") existing_type = existing_task_info.get("download_type")
logger.debug(f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}', info_type='{existing_type}'. Comparing with target_type='{download_type}'.") logger.debug(
f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}', info_type='{existing_type}'. Comparing with target_type='{download_type}'."
)
if existing_type != download_type: if existing_type != download_type:
logger.debug(f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}' type mismatch. Skipping.") logger.debug(
f"GET_EXISTING_TASK_ID: Task_id='{existing_task_id}' type mismatch. Skipping."
)
continue continue
# Found an active task that matches the criteria. # Found an active task that matches the criteria.
logger.info(f"GET_EXISTING_TASK_ID: Found ACTIVE duplicate: task_id='{existing_task_id}' for URL='{url}', type='{download_type}'. Returning this ID.") logger.info(
f"GET_EXISTING_TASK_ID: Found ACTIVE duplicate: task_id='{existing_task_id}' for URL='{url}', type='{download_type}'. Returning this ID."
)
return existing_task_id return existing_task_id
logger.debug(f"GET_EXISTING_TASK_ID: No active duplicate found for URL='{url}', type='{download_type}'. Returning None.") logger.debug(
f"GET_EXISTING_TASK_ID: No active duplicate found for URL='{url}', type='{download_type}'. Returning None."
)
return None return None
@@ -255,11 +292,16 @@ class CeleryDownloadQueueManager:
existing_url = existing_task_info.get("url") existing_url = existing_task_info.get("url")
existing_type = existing_task_info.get("download_type") existing_type = existing_task_info.get("download_type")
# Extract status from standard structure (status_info.status) or fallback to top-level status # Extract status from standard structure (status_info.status) or fallback to top-level status
existing_status = None existing_status = None
if "status_info" in existing_last_status_obj and existing_last_status_obj["status_info"]: if (
existing_status = existing_last_status_obj["status_info"].get("status") "status_info" in existing_last_status_obj
and existing_last_status_obj["status_info"]
):
existing_status = existing_last_status_obj["status_info"].get(
"status"
)
if not existing_status: if not existing_status:
existing_status = existing_last_status_obj.get("status") existing_status = existing_last_status_obj.get("status")
@@ -350,6 +392,13 @@ class CeleryDownloadQueueManager:
"bitrate": original_request.get( "bitrate": original_request.get(
"bitrate", config_params.get("bitrate") "bitrate", config_params.get("bitrate")
), ),
"artist_separator": original_request.get(
"artist_separator", config_params.get("artist_separator", "; ")
),
"recursive_quality": self._parse_bool_param(
original_request.get("recursive_quality"),
config_params.get("recursive_quality", False),
),
"retry_count": 0, "retry_count": 0,
"original_request": original_request, "original_request": original_request,
"created_at": time.time(), "created_at": time.time(),

View File

@@ -2,7 +2,6 @@ import time
import json import json
import logging import logging
import traceback import traceback
import asyncio
from celery import Celery, Task, states from celery import Celery, Task, states
from celery.signals import ( from celery.signals import (
task_prerun, task_prerun,
@@ -35,6 +34,7 @@ from routes.utils.history_manager import history_manager
# Create Redis connection for storing task data that's not part of the Celery result backend # Create Redis connection for storing task data that's not part of the Celery result backend
import redis import redis
# --- Helpers to build partial summaries from task logs --- # --- Helpers to build partial summaries from task logs ---
def _read_task_log_json_lines(task_id: str) -> list: def _read_task_log_json_lines(task_id: str) -> list:
log_file_path = Path("./logs/tasks") / f"{task_id}.log" log_file_path = Path("./logs/tasks") / f"{task_id}.log"
@@ -69,7 +69,10 @@ def _extract_parent_initial_tracks(log_lines: list, parent_type: str) -> dict:
if album and isinstance(album, dict) and album.get("tracks"): if album and isinstance(album, dict) and album.get("tracks"):
for t in album.get("tracks", []): for t in album.get("tracks", []):
ids = (t or {}).get("ids", {}) or {} ids = (t or {}).get("ids", {}) or {}
key = ids.get("spotify") or f"{(t or {}).get('track_number', 0)}:{(t or {}).get('title', '')}" key = (
ids.get("spotify")
or f"{(t or {}).get('track_number', 0)}:{(t or {}).get('title', '')}"
)
track_map[key] = t track_map[key] = t
break break
elif parent_type == "playlist": elif parent_type == "playlist":
@@ -79,13 +82,18 @@ def _extract_parent_initial_tracks(log_lines: list, parent_type: str) -> dict:
for t in playlist.get("tracks", []): for t in playlist.get("tracks", []):
ids = (t or {}).get("ids", {}) or {} ids = (t or {}).get("ids", {}) or {}
# TrackPlaylistObject uses position # TrackPlaylistObject uses position
key = ids.get("spotify") or f"{(t or {}).get('position', 0)}:{(t or {}).get('title', '')}" key = (
ids.get("spotify")
or f"{(t or {}).get('position', 0)}:{(t or {}).get('title', '')}"
)
track_map[key] = t track_map[key] = t
break break
return track_map return track_map
def _extract_completed_and_skipped_from_logs(log_lines: list) -> tuple[set, set, dict, dict]: def _extract_completed_and_skipped_from_logs(
log_lines: list,
) -> tuple[set, set, dict, dict]:
""" """
Returns (completed_keys, skipped_keys, completed_objects_by_key, skipped_objects_by_key) Returns (completed_keys, skipped_keys, completed_objects_by_key, skipped_objects_by_key)
Keys prefer ids.spotify, falling back to index+title scheme consistent with initial map. Keys prefer ids.spotify, falling back to index+title scheme consistent with initial map.
@@ -102,7 +110,9 @@ def _extract_completed_and_skipped_from_logs(log_lines: list) -> tuple[set, set,
status = status_info.get("status") status = status_info.get("status")
ids = (track or {}).get("ids", {}) or {} ids = (track or {}).get("ids", {}) or {}
# Fallback keys try track_number:title and position:title # Fallback keys try track_number:title and position:title
fallback_key = f"{(track or {}).get('track_number', 0)}:{(track or {}).get('title', '')}" fallback_key = (
f"{(track or {}).get('track_number', 0)}:{(track or {}).get('title', '')}"
)
key = ids.get("spotify") or fallback_key key = ids.get("spotify") or fallback_key
if status == "done": if status == "done":
completed_keys.add(key) completed_keys.add(key)
@@ -128,11 +138,13 @@ def _to_track_object_from_initial(initial_track: dict, parent_type: str) -> dict
artists_conv = [] artists_conv = []
for a in artists_src: for a in artists_src:
if isinstance(a, dict): if isinstance(a, dict):
artists_conv.append({ artists_conv.append(
"type": "artistTrack", {
"name": a.get("name", ""), "type": "artistTrack",
"ids": a.get("ids", {}) or {}, "name": a.get("name", ""),
}) "ids": a.get("ids", {}) or {},
}
)
# Convert album to AlbumTrackObject-like shape # Convert album to AlbumTrackObject-like shape
album_src = initial_track.get("album", {}) or {} album_src = initial_track.get("album", {}) or {}
@@ -177,16 +189,23 @@ def build_partial_summary_from_task_log(task_id: str, parent_type: str) -> dict:
""" """
log_lines = _read_task_log_json_lines(task_id) log_lines = _read_task_log_json_lines(task_id)
initial_tracks_map = _extract_parent_initial_tracks(log_lines, parent_type) initial_tracks_map = _extract_parent_initial_tracks(log_lines, parent_type)
completed_keys, skipped_keys, completed_objs, skipped_objs = _extract_completed_and_skipped_from_logs(log_lines) completed_keys, skipped_keys, completed_objs, skipped_objs = (
_extract_completed_and_skipped_from_logs(log_lines)
)
# Determine failed as initial - completed - skipped # Determine failed as initial - completed - skipped
initial_keys = set(initial_tracks_map.keys()) initial_keys = set(initial_tracks_map.keys())
failed_keys = initial_keys.difference(completed_keys.union(skipped_keys)) failed_keys = initial_keys.difference(completed_keys.union(skipped_keys))
successful_tracks = [completed_objs[k] for k in completed_keys if k in completed_objs] successful_tracks = [
completed_objs[k] for k in completed_keys if k in completed_objs
]
skipped_tracks = [skipped_objs[k] for k in skipped_keys if k in skipped_objs] skipped_tracks = [skipped_objs[k] for k in skipped_keys if k in skipped_objs]
failed_tracks = [ failed_tracks = [
{"track": _to_track_object_from_initial(initial_tracks_map[k], parent_type), "reason": "cancelled"} {
"track": _to_track_object_from_initial(initial_tracks_map[k], parent_type),
"reason": "cancelled",
}
for k in failed_keys for k in failed_keys
if k in initial_tracks_map if k in initial_tracks_map
] ]
@@ -224,16 +243,15 @@ def trigger_sse_event(task_id: str, reason: str = "status_change"):
trigger_sse_update_task.apply_async( trigger_sse_update_task.apply_async(
args=[task_id, reason], args=[task_id, reason],
queue="utility_tasks", queue="utility_tasks",
priority=9 # High priority for real-time updates priority=9, # High priority for real-time updates
) )
# Only log at debug level to reduce verbosity # Only log at debug level to reduce verbosity
logger.debug(f"SSE: Submitted SSE update task for {task_id} (reason: {reason})") logger.debug(f"SSE: Submitted SSE update task for {task_id} (reason: {reason})")
except Exception as e: except Exception as e:
logger.error(f"Error submitting SSE update task for task {task_id}: {e}", exc_info=True) logger.error(
f"Error submitting SSE update task for task {task_id}: {e}", exc_info=True
)
class ProgressState: class ProgressState:
@@ -318,10 +336,10 @@ def store_task_status(task_id, status_data):
redis_client.publish( redis_client.publish(
update_channel, json.dumps({"task_id": task_id, "status_id": status_id}) update_channel, json.dumps({"task_id": task_id, "status_id": status_id})
) )
# Trigger immediate SSE event for real-time frontend updates # Trigger immediate SSE event for real-time frontend updates
trigger_sse_event(task_id, "status_update") trigger_sse_event(task_id, "status_update")
except Exception as e: except Exception as e:
logger.error(f"Error storing task status: {e}") logger.error(f"Error storing task status: {e}")
traceback.print_exc() traceback.print_exc()
@@ -421,7 +439,7 @@ def cancel_task(task_id):
"status": ProgressState.CANCELLED, "status": ProgressState.CANCELLED,
"error": "Task cancelled by user", "error": "Task cancelled by user",
"timestamp": time.time(), "timestamp": time.time(),
} },
}, },
) )
@@ -616,6 +634,7 @@ def retry_task(task_id):
logger.error(f"Error retrying task {task_id}: {e}", exc_info=True) logger.error(f"Error retrying task {task_id}: {e}", exc_info=True)
return {"status": "error", "error": str(e)} return {"status": "error", "error": str(e)}
class ProgressTrackingTask(Task): class ProgressTrackingTask(Task):
"""Base task class that tracks progress through callbacks""" """Base task class that tracks progress through callbacks"""
@@ -633,7 +652,7 @@ class ProgressTrackingTask(Task):
task_id = self.request.id task_id = self.request.id
# Ensure ./logs/tasks directory exists # Ensure ./logs/tasks directory exists
logs_tasks_dir = Path("./logs/tasks") logs_tasks_dir = Path("./logs/tasks")
try: try:
logs_tasks_dir.mkdir(parents=True, exist_ok=True) logs_tasks_dir.mkdir(parents=True, exist_ok=True)
except Exception as e: except Exception as e:
@@ -650,7 +669,7 @@ class ProgressTrackingTask(Task):
log_entry = progress_data.copy() log_entry = progress_data.copy()
if "timestamp" not in log_entry: if "timestamp" not in log_entry:
log_entry["timestamp"] = time.time() log_entry["timestamp"] = time.time()
print(json.dumps(log_entry), file=log_file) print(json.dumps(log_entry), file=log_file)
except Exception as e: except Exception as e:
logger.error( logger.error(
f"Task {task_id}: Could not write to task log file {log_file_path}: {e}" f"Task {task_id}: Could not write to task log file {log_file_path}: {e}"
@@ -663,7 +682,7 @@ class ProgressTrackingTask(Task):
status_info = progress_data.get("status_info", {}) status_info = progress_data.get("status_info", {})
status = status_info.get("status", progress_data.get("status", "unknown")) status = status_info.get("status", progress_data.get("status", "unknown"))
task_info = get_task_info(task_id) task_info = get_task_info(task_id)
logger.debug(f"Task {task_id}: Extracted status: '{status}' from callback") logger.debug(f"Task {task_id}: Extracted status: '{status}' from callback")
if logger.isEnabledFor(logging.DEBUG): if logger.isEnabledFor(logging.DEBUG):
@@ -704,59 +723,80 @@ class ProgressTrackingTask(Task):
def _handle_initializing(self, task_id, data, task_info): def _handle_initializing(self, task_id, data, task_info):
"""Handle initializing status from deezspot""" """Handle initializing status from deezspot"""
logger.info(f"Task {task_id} initializing...") logger.info(f"Task {task_id} initializing...")
# Initializing object is now very basic, mainly for acknowledging the start. # Initializing object is now very basic, mainly for acknowledging the start.
# More detailed info comes with 'progress' or 'downloading' states. # More detailed info comes with 'progress' or 'downloading' states.
data["status"] = ProgressState.INITIALIZING data["status"] = ProgressState.INITIALIZING
# Store initial history entry for download start # Store initial history entry for download start
try: try:
# Check for album/playlist FIRST since their callbacks contain both parent and track info # Check for album/playlist FIRST since their callbacks contain both parent and track info
if "album" in data: if "album" in data:
# Album download - create children table and store name in task info # Album download - create children table and store name in task info
logger.info(f"Task {task_id}: Creating album children table") logger.info(f"Task {task_id}: Creating album children table")
children_table = history_manager.store_album_history(data, task_id, "in_progress") children_table = history_manager.store_album_history(
data, task_id, "in_progress"
)
if children_table: if children_table:
task_info["children_table"] = children_table task_info["children_table"] = children_table
store_task_info(task_id, task_info) store_task_info(task_id, task_info)
logger.info(f"Task {task_id}: Created and stored children table '{children_table}' in task info") logger.info(
f"Task {task_id}: Created and stored children table '{children_table}' in task info"
)
else: else:
logger.error(f"Task {task_id}: Failed to create album children table") logger.error(
f"Task {task_id}: Failed to create album children table"
)
elif "playlist" in data: elif "playlist" in data:
# Playlist download - create children table and store name in task info # Playlist download - create children table and store name in task info
logger.info(f"Task {task_id}: Creating playlist children table") logger.info(f"Task {task_id}: Creating playlist children table")
children_table = history_manager.store_playlist_history(data, task_id, "in_progress") children_table = history_manager.store_playlist_history(
data, task_id, "in_progress"
)
if children_table: if children_table:
task_info["children_table"] = children_table task_info["children_table"] = children_table
store_task_info(task_id, task_info) store_task_info(task_id, task_info)
logger.info(f"Task {task_id}: Created and stored children table '{children_table}' in task info") logger.info(
f"Task {task_id}: Created and stored children table '{children_table}' in task info"
)
else: else:
logger.error(f"Task {task_id}: Failed to create playlist children table") logger.error(
f"Task {task_id}: Failed to create playlist children table"
)
elif "track" in data: elif "track" in data:
# Individual track download - check if it's part of an album/playlist # Individual track download - check if it's part of an album/playlist
children_table = task_info.get("children_table") children_table = task_info.get("children_table")
if children_table: if children_table:
# Track is part of album/playlist - don't store in main table during initialization # Track is part of album/playlist - don't store in main table during initialization
logger.info(f"Task {task_id}: Skipping track initialization storage (part of album/playlist, children table: {children_table})") logger.info(
f"Task {task_id}: Skipping track initialization storage (part of album/playlist, children table: {children_table})"
)
else: else:
# Individual track download - store in main table # Individual track download - store in main table
logger.info(f"Task {task_id}: Storing individual track history (initializing)") logger.info(
f"Task {task_id}: Storing individual track history (initializing)"
)
history_manager.store_track_history(data, task_id, "in_progress") history_manager.store_track_history(data, task_id, "in_progress")
except Exception as e: except Exception as e:
logger.error(f"Failed to store initial history for task {task_id}: {e}", exc_info=True) logger.error(
f"Failed to store initial history for task {task_id}: {e}",
exc_info=True,
)
def _handle_downloading(self, task_id, data, task_info): def _handle_downloading(self, task_id, data, task_info):
"""Handle downloading status from deezspot""" """Handle downloading status from deezspot"""
track_obj = data.get("track", {}) track_obj = data.get("track", {})
track_name = track_obj.get("title", "Unknown") track_name = track_obj.get("title", "Unknown")
artists = track_obj.get("artists", []) artists = track_obj.get("artists", [])
artist_name = artists[0].get("name", "") if artists else "" artist_name = artists[0].get("name", "") if artists else ""
album_obj = track_obj.get("album", {}) album_obj = track_obj.get("album", {})
album_name = album_obj.get("title", "") album_name = album_obj.get("title", "")
logger.info(f"Task {task_id}: Starting download for track '{track_name}' by {artist_name}") logger.info(
f"Task {task_id}: Starting download for track '{track_name}' by {artist_name}"
)
data["status"] = ProgressState.DOWNLOADING data["status"] = ProgressState.DOWNLOADING
data["song"] = track_name data["song"] = track_name
@@ -767,14 +807,14 @@ class ProgressTrackingTask(Task):
"""Handle progress status for albums/playlists from deezspot""" """Handle progress status for albums/playlists from deezspot"""
item = data.get("playlist") or data.get("album", {}) item = data.get("playlist") or data.get("album", {})
track = data.get("track", {}) track = data.get("track", {})
item_name = item.get("title", "Unknown Item") item_name = item.get("title", "Unknown Item")
total_tracks = item.get("total_tracks", 0) total_tracks = item.get("total_tracks", 0)
track_name = track.get("title", "Unknown Track") track_name = track.get("title", "Unknown Track")
artists = track.get("artists", []) artists = track.get("artists", [])
artist_name = artists[0].get("name", "") if artists else "" artist_name = artists[0].get("name", "") if artists else ""
# The 'progress' field in the callback is the track number being processed # The 'progress' field in the callback is the track number being processed
current_track_num = data.get("progress", 0) current_track_num = data.get("progress", 0)
@@ -783,13 +823,17 @@ class ProgressTrackingTask(Task):
task_info["completed_tracks"] = current_track_num - 1 task_info["completed_tracks"] = current_track_num - 1
task_info["current_track_num"] = current_track_num task_info["current_track_num"] = current_track_num
store_task_info(task_id, task_info) store_task_info(task_id, task_info)
overall_progress = min(int(((current_track_num -1) / total_tracks) * 100), 100) overall_progress = min(
int(((current_track_num - 1) / total_tracks) * 100), 100
)
data["overall_progress"] = overall_progress data["overall_progress"] = overall_progress
data["parsed_current_track"] = current_track_num data["parsed_current_track"] = current_track_num
data["parsed_total_tracks"] = total_tracks data["parsed_total_tracks"] = total_tracks
logger.info(f"Task {task_id}: Progress on '{item_name}': Processing track {current_track_num}/{total_tracks} - '{track_name}'") logger.info(
f"Task {task_id}: Progress on '{item_name}': Processing track {current_track_num}/{total_tracks} - '{track_name}'"
)
data["status"] = ProgressState.PROGRESS data["status"] = ProgressState.PROGRESS
data["song"] = track_name data["song"] = track_name
@@ -801,9 +845,11 @@ class ProgressTrackingTask(Task):
track_obj = data.get("track", {}) track_obj = data.get("track", {})
track_name = track_obj.get("title", "Unknown Track") track_name = track_obj.get("title", "Unknown Track")
percentage = data.get("percentage", 0) percentage = data.get("percentage", 0)
logger.debug(f"Task {task_id}: Real-time progress for '{track_name}': {percentage}%") logger.debug(
f"Task {task_id}: Real-time progress for '{track_name}': {percentage}%"
)
data["song"] = track_name data["song"] = track_name
artist = data.get("artist", "Unknown") artist = data.get("artist", "Unknown")
@@ -838,28 +884,38 @@ class ProgressTrackingTask(Task):
) )
# Log at debug level # Log at debug level
logger.debug(f"Task {task_id} track progress: {track_name} by {artist}: {percent}%") logger.debug(
f"Task {task_id} track progress: {track_name} by {artist}: {percent}%"
)
def _handle_skipped(self, task_id, data, task_info): def _handle_skipped(self, task_id, data, task_info):
"""Handle skipped status from deezspot""" """Handle skipped status from deezspot"""
# Store skipped history for deezspot callback format # Store skipped history for deezspot callback format
try: try:
if "track" in data: if "track" in data:
# Individual track skipped - check if we should use children table # Individual track skipped - check if we should use children table
children_table = task_info.get("children_table") children_table = task_info.get("children_table")
logger.debug(f"Task {task_id}: Skipped track, children_table = '{children_table}'") logger.debug(
f"Task {task_id}: Skipped track, children_table = '{children_table}'"
)
if children_table: if children_table:
# Part of album/playlist - store progressively in children table # Part of album/playlist - store progressively in children table
logger.info(f"Task {task_id}: Storing skipped track in children table '{children_table}' (progressive)") logger.info(
history_manager.store_track_history(data, task_id, "skipped", children_table) f"Task {task_id}: Storing skipped track in children table '{children_table}' (progressive)"
)
history_manager.store_track_history(
data, task_id, "skipped", children_table
)
else: else:
# Individual track download - store in main table # Individual track download - store in main table
logger.info(f"Task {task_id}: Storing skipped track in main table (individual download)") logger.info(
f"Task {task_id}: Storing skipped track in main table (individual download)"
)
history_manager.store_track_history(data, task_id, "skipped") history_manager.store_track_history(data, task_id, "skipped")
except Exception as e: except Exception as e:
logger.error(f"Failed to store skipped history for task {task_id}: {e}") logger.error(f"Failed to store skipped history for task {task_id}: {e}")
# Extract track info (legacy format support) # Extract track info (legacy format support)
title = data.get("song", "Unknown") title = data.get("song", "Unknown")
artist = data.get("artist", "Unknown") artist = data.get("artist", "Unknown")
@@ -933,7 +989,7 @@ class ProgressTrackingTask(Task):
def _handle_error(self, task_id, data, task_info): def _handle_error(self, task_id, data, task_info):
"""Handle error status from deezspot""" """Handle error status from deezspot"""
# Store error history for deezspot callback format # Store error history for deezspot callback format
try: try:
# Check for album/playlist FIRST since their callbacks contain both parent and track info # Check for album/playlist FIRST since their callbacks contain both parent and track info
@@ -948,18 +1004,26 @@ class ProgressTrackingTask(Task):
elif "track" in data: elif "track" in data:
# Individual track failed - check if we should use children table # Individual track failed - check if we should use children table
children_table = task_info.get("children_table") children_table = task_info.get("children_table")
logger.debug(f"Task {task_id}: Failed track, children_table = '{children_table}'") logger.debug(
f"Task {task_id}: Failed track, children_table = '{children_table}'"
)
if children_table: if children_table:
# Part of album/playlist - store progressively in children table # Part of album/playlist - store progressively in children table
logger.info(f"Task {task_id}: Storing failed track in children table '{children_table}' (progressive)") logger.info(
history_manager.store_track_history(data, task_id, "failed", children_table) f"Task {task_id}: Storing failed track in children table '{children_table}' (progressive)"
)
history_manager.store_track_history(
data, task_id, "failed", children_table
)
else: else:
# Individual track download - store in main table # Individual track download - store in main table
logger.info(f"Task {task_id}: Storing failed track in main table (individual download)") logger.info(
f"Task {task_id}: Storing failed track in main table (individual download)"
)
history_manager.store_track_history(data, task_id, "failed") history_manager.store_track_history(data, task_id, "failed")
except Exception as e: except Exception as e:
logger.error(f"Failed to store error history for task {task_id}: {e}") logger.error(f"Failed to store error history for task {task_id}: {e}")
# Extract error info (legacy format support) # Extract error info (legacy format support)
message = data.get("message", "Unknown error") message = data.get("message", "Unknown error")
@@ -977,7 +1041,7 @@ class ProgressTrackingTask(Task):
def _handle_done(self, task_id, data, task_info): def _handle_done(self, task_id, data, task_info):
"""Handle done status from deezspot""" """Handle done status from deezspot"""
# Store completion history for deezspot callback format # Store completion history for deezspot callback format
try: try:
# Check for album/playlist FIRST since their callbacks contain both parent and track info # Check for album/playlist FIRST since their callbacks contain both parent and track info
@@ -992,18 +1056,29 @@ class ProgressTrackingTask(Task):
elif "track" in data: elif "track" in data:
# Individual track completion - check if we should use children table # Individual track completion - check if we should use children table
children_table = task_info.get("children_table") children_table = task_info.get("children_table")
logger.debug(f"Task {task_id}: Completed track, children_table = '{children_table}'") logger.debug(
f"Task {task_id}: Completed track, children_table = '{children_table}'"
)
if children_table: if children_table:
# Part of album/playlist - store progressively in children table # Part of album/playlist - store progressively in children table
logger.info(f"Task {task_id}: Storing completed track in children table '{children_table}' (progressive)") logger.info(
history_manager.store_track_history(data, task_id, "completed", children_table) f"Task {task_id}: Storing completed track in children table '{children_table}' (progressive)"
)
history_manager.store_track_history(
data, task_id, "completed", children_table
)
else: else:
# Individual track download - store in main table # Individual track download - store in main table
logger.info(f"Task {task_id}: Storing completed track in main table (individual download)") logger.info(
f"Task {task_id}: Storing completed track in main table (individual download)"
)
history_manager.store_track_history(data, task_id, "completed") history_manager.store_track_history(data, task_id, "completed")
except Exception as e: except Exception as e:
logger.error(f"Failed to store completion history for task {task_id}: {e}", exc_info=True) logger.error(
f"Failed to store completion history for task {task_id}: {e}",
exc_info=True,
)
# Extract data (legacy format support) # Extract data (legacy format support)
content_type = data.get("type", "").lower() content_type = data.get("type", "").lower()
album = data.get("album", "") album = data.get("album", "")
@@ -1177,9 +1252,9 @@ def task_prerun_handler(task_id=None, task=None, *args, **kwargs):
"""Signal handler when a task begins running""" """Signal handler when a task begins running"""
try: try:
# Skip verbose logging for SSE tasks # Skip verbose logging for SSE tasks
if task and hasattr(task, 'name') and task.name in ['trigger_sse_update_task']: if task and hasattr(task, "name") and task.name in ["trigger_sse_update_task"]:
return return
task_info = get_task_info(task_id) task_info = get_task_info(task_id)
# Update task status to processing # Update task status to processing
@@ -1208,9 +1283,9 @@ def task_postrun_handler(
"""Signal handler when a task finishes""" """Signal handler when a task finishes"""
try: try:
# Skip verbose logging for SSE tasks # Skip verbose logging for SSE tasks
if task and hasattr(task, 'name') and task.name in ['trigger_sse_update_task']: if task and hasattr(task, "name") and task.name in ["trigger_sse_update_task"]:
return return
last_status_for_history = get_last_task_status(task_id) last_status_for_history = get_last_task_status(task_id)
if last_status_for_history and last_status_for_history.get("status") in [ if last_status_for_history and last_status_for_history.get("status") in [
ProgressState.COMPLETE, ProgressState.COMPLETE,
@@ -1223,9 +1298,7 @@ def task_postrun_handler(
state == states.REVOKED state == states.REVOKED
and last_status_for_history.get("status") != ProgressState.CANCELLED and last_status_for_history.get("status") != ProgressState.CANCELLED
): ):
logger.info( logger.info(f"Task {task_id} was REVOKED (likely cancelled).")
f"Task {task_id} was REVOKED (likely cancelled)."
)
# return # Let status update proceed if necessary # return # Let status update proceed if necessary
task_info = get_task_info(task_id) task_info = get_task_info(task_id)
@@ -1235,8 +1308,13 @@ def task_postrun_handler(
# If task was cancelled/revoked, finalize parent history with partial summary # If task was cancelled/revoked, finalize parent history with partial summary
try: try:
if state == states.REVOKED or current_redis_status == ProgressState.CANCELLED: if (
parent_type = (task_info.get("download_type") or task_info.get("type") or "").lower() state == states.REVOKED
or current_redis_status == ProgressState.CANCELLED
):
parent_type = (
task_info.get("download_type") or task_info.get("type") or ""
).lower()
if parent_type in ["album", "playlist"]: if parent_type in ["album", "playlist"]:
# Build detailed summary from the task log # Build detailed summary from the task log
summary = build_partial_summary_from_task_log(task_id, parent_type) summary = build_partial_summary_from_task_log(task_id, parent_type)
@@ -1247,14 +1325,24 @@ def task_postrun_handler(
# Try to enrich parent payload with initial callback object (to capture artists, ids, images) # Try to enrich parent payload with initial callback object (to capture artists, ids, images)
try: try:
log_lines = _read_task_log_json_lines(task_id) log_lines = _read_task_log_json_lines(task_id)
initial_parent = _extract_initial_parent_object(log_lines, parent_type) initial_parent = _extract_initial_parent_object(
log_lines, parent_type
)
except Exception: except Exception:
initial_parent = None initial_parent = None
if parent_type == "album": if parent_type == "album":
album_payload = {"title": title, "total_tracks": total_tracks} album_payload = {"title": title, "total_tracks": total_tracks}
if isinstance(initial_parent, dict): if isinstance(initial_parent, dict):
for k in ["artists", "ids", "images", "release_date", "genres", "album_type", "tracks"]: for k in [
"artists",
"ids",
"images",
"release_date",
"genres",
"album_type",
"tracks",
]:
if k in initial_parent: if k in initial_parent:
album_payload[k] = initial_parent.get(k) album_payload[k] = initial_parent.get(k)
# Ensure a main history entry exists even on cancellation # Ensure a main history entry exists even on cancellation
@@ -1266,7 +1354,13 @@ def task_postrun_handler(
else: else:
playlist_payload = {"title": title} playlist_payload = {"title": title}
if isinstance(initial_parent, dict): if isinstance(initial_parent, dict):
for k in ["owner", "ids", "images", "tracks", "description"]: for k in [
"owner",
"ids",
"images",
"tracks",
"description",
]:
if k in initial_parent: if k in initial_parent:
playlist_payload[k] = initial_parent.get(k) playlist_payload[k] = initial_parent.get(k)
history_manager.store_playlist_history( history_manager.store_playlist_history(
@@ -1314,14 +1408,17 @@ def task_postrun_handler(
try: try:
# Use task_id as primary source for metadata extraction # Use task_id as primary source for metadata extraction
add_single_track_to_playlist_db( add_single_track_to_playlist_db(
playlist_spotify_id=playlist_id, playlist_spotify_id=playlist_id,
track_item_for_db=track_item_for_db, # Keep as fallback track_item_for_db=track_item_for_db, # Keep as fallback
task_id=task_id # Primary source for metadata task_id=task_id, # Primary source for metadata
) )
# Update the playlist's m3u file after successful track addition # Update the playlist's m3u file after successful track addition
try: try:
from routes.utils.watch.manager import update_playlist_m3u_file from routes.utils.watch.manager import (
update_playlist_m3u_file,
)
logger.info( logger.info(
f"Updating m3u file for playlist {playlist_id} after successful track download." f"Updating m3u file for playlist {playlist_id} after successful track download."
) )
@@ -1331,7 +1428,7 @@ def task_postrun_handler(
f"Failed to update m3u file for playlist {playlist_id} after successful track download task {task_id}: {m3u_update_err}", f"Failed to update m3u file for playlist {playlist_id} after successful track download task {task_id}: {m3u_update_err}",
exc_info=True, exc_info=True,
) )
except Exception as db_add_err: except Exception as db_add_err:
logger.error( logger.error(
f"Failed to add track to DB for playlist {playlist_id} after successful download task {task_id}: {db_add_err}", f"Failed to add track to DB for playlist {playlist_id} after successful download task {task_id}: {db_add_err}",
@@ -1390,9 +1487,6 @@ def task_failure_handler(
if isinstance(exception, Retry): if isinstance(exception, Retry):
return return
# Define download task names
download_task_names = ["download_track", "download_album", "download_playlist"]
# Get task info and status # Get task info and status
task_info = get_task_info(task_id) task_info = get_task_info(task_id)
last_status = get_last_task_status(task_id) last_status = get_last_task_status(task_id)
@@ -1523,6 +1617,12 @@ def download_track(self, **task_data):
save_cover = task_data.get("save_cover", config_params.get("save_cover", True)) save_cover = task_data.get("save_cover", config_params.get("save_cover", True))
convert_to = task_data.get("convertTo", config_params.get("convertTo")) convert_to = task_data.get("convertTo", config_params.get("convertTo"))
bitrate = task_data.get("bitrate", config_params.get("bitrate")) bitrate = task_data.get("bitrate", config_params.get("bitrate"))
recursive_quality = task_data.get(
"recursive_quality", config_params.get("recursive_quality", False)
)
artist_separator = task_data.get(
"artist_separator", config_params.get("artist_separator", "; ")
)
# Execute the download - service is now determined from URL # Execute the download - service is now determined from URL
download_track_func( download_track_func(
@@ -1539,6 +1639,8 @@ def download_track(self, **task_data):
progress_callback=self.progress_callback, progress_callback=self.progress_callback,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
recursive_quality=recursive_quality,
artist_separator=artist_separator,
_is_celery_task_execution=True, # Skip duplicate check inside Celery task (consistency) _is_celery_task_execution=True, # Skip duplicate check inside Celery task (consistency)
) )
@@ -1610,6 +1712,12 @@ def download_album(self, **task_data):
save_cover = task_data.get("save_cover", config_params.get("save_cover", True)) save_cover = task_data.get("save_cover", config_params.get("save_cover", True))
convert_to = task_data.get("convertTo", config_params.get("convertTo")) convert_to = task_data.get("convertTo", config_params.get("convertTo"))
bitrate = task_data.get("bitrate", config_params.get("bitrate")) bitrate = task_data.get("bitrate", config_params.get("bitrate"))
recursive_quality = task_data.get(
"recursive_quality", config_params.get("recursive_quality", False)
)
artist_separator = task_data.get(
"artist_separator", config_params.get("artist_separator", "; ")
)
# Execute the download - service is now determined from URL # Execute the download - service is now determined from URL
download_album_func( download_album_func(
@@ -1626,6 +1734,8 @@ def download_album(self, **task_data):
progress_callback=self.progress_callback, progress_callback=self.progress_callback,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
recursive_quality=recursive_quality,
artist_separator=artist_separator,
_is_celery_task_execution=True, # Skip duplicate check inside Celery task _is_celery_task_execution=True, # Skip duplicate check inside Celery task
) )
@@ -1697,6 +1807,12 @@ def download_playlist(self, **task_data):
save_cover = task_data.get("save_cover", config_params.get("save_cover", True)) save_cover = task_data.get("save_cover", config_params.get("save_cover", True))
convert_to = task_data.get("convertTo", config_params.get("convertTo")) convert_to = task_data.get("convertTo", config_params.get("convertTo"))
bitrate = task_data.get("bitrate", config_params.get("bitrate")) bitrate = task_data.get("bitrate", config_params.get("bitrate"))
recursive_quality = task_data.get(
"recursive_quality", config_params.get("recursive_quality", False)
)
artist_separator = task_data.get(
"artist_separator", config_params.get("artist_separator", "; ")
)
# Get retry parameters # Get retry parameters
initial_retry_delay = task_data.get( initial_retry_delay = task_data.get(
@@ -1725,6 +1841,8 @@ def download_playlist(self, **task_data):
progress_callback=self.progress_callback, progress_callback=self.progress_callback,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
recursive_quality=recursive_quality,
artist_separator=artist_separator,
_is_celery_task_execution=True, # Skip duplicate check inside Celery task _is_celery_task_execution=True, # Skip duplicate check inside Celery task
) )
@@ -1868,11 +1986,7 @@ def delayed_delete_task_data(task_id, reason):
delete_task_data_and_log(task_id, reason) delete_task_data_and_log(task_id, reason)
@celery_app.task( @celery_app.task(name="trigger_sse_update_task", queue="utility_tasks", bind=True)
name="trigger_sse_update_task",
queue="utility_tasks",
bind=True
)
def trigger_sse_update_task(self, task_id: str, reason: str = "status_update"): def trigger_sse_update_task(self, task_id: str, reason: str = "status_update"):
""" """
Dedicated Celery task for triggering SSE task summary updates. Dedicated Celery task for triggering SSE task summary updates.
@@ -1880,35 +1994,41 @@ def trigger_sse_update_task(self, task_id: str, reason: str = "status_update"):
""" """
try: try:
# Send task summary update via Redis pub/sub # Send task summary update via Redis pub/sub
logger.debug(f"SSE Task: Processing summary update for task {task_id} (reason: {reason})") logger.debug(
f"SSE Task: Processing summary update for task {task_id} (reason: {reason})"
)
event_data = { event_data = {
"task_id": task_id, "task_id": task_id,
"reason": reason, "reason": reason,
"timestamp": time.time(), "timestamp": time.time(),
"change_type": "task_summary", "change_type": "task_summary",
"event_type": "summary_update" "event_type": "summary_update",
} }
# Use Redis pub/sub for cross-process communication # Use Redis pub/sub for cross-process communication
redis_client.publish("sse_events", json.dumps(event_data)) redis_client.publish("sse_events", json.dumps(event_data))
logger.debug(f"SSE Task: Published summary update for task {task_id}") logger.debug(f"SSE Task: Published summary update for task {task_id}")
except Exception as e: except Exception as e:
# Only log errors, not success cases # Only log errors, not success cases
logger.error(f"SSE Task: Failed to publish summary update for task {task_id}: {e}", exc_info=True) logger.error(
f"SSE Task: Failed to publish summary update for task {task_id}: {e}",
exc_info=True,
)
# Don't raise exception to avoid task retry - SSE updates are best-effort # Don't raise exception to avoid task retry - SSE updates are best-effort
def _extract_initial_parent_object(log_lines: list, parent_type: str) -> dict | None: def _extract_initial_parent_object(log_lines: list, parent_type: str) -> dict | None:
"""Return the first album/playlist object from the log's initializing callback, if present.""" """Return the first album/playlist object from the log's initializing callback, if present."""
key = "album" if parent_type == "album" else ("playlist" if parent_type == "playlist" else None) key = (
"album"
if parent_type == "album"
else ("playlist" if parent_type == "playlist" else None)
)
if not key: if not key:
return None return None
for obj in log_lines: for obj in log_lines:
if key in obj and isinstance(obj[key], dict): if key in obj and isinstance(obj[key], dict):
return obj[key] return obj[key]
return None return None

View File

@@ -1,12 +1,9 @@
import spotipy import spotipy
from spotipy.oauth2 import SpotifyClientCredentials from spotipy.oauth2 import SpotifyClientCredentials
from routes.utils.celery_queue_manager import get_config_params from routes.utils.credentials import _get_global_spotify_api_creds
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
import logging import logging
import time import time
from typing import Dict, List, Optional, Any from typing import Dict, Optional, Any
import json
from pathlib import Path
# Import Deezer API and logging # Import Deezer API and logging
from deezspot.deezloader.dee_api import API as DeezerAPI from deezspot.deezloader.dee_api import API as DeezerAPI
@@ -19,19 +16,21 @@ _spotify_client = None
_last_client_init = 0 _last_client_init = 0
_client_init_interval = 3600 # Reinitialize client every hour _client_init_interval = 3600 # Reinitialize client every hour
def _get_spotify_client(): def _get_spotify_client():
""" """
Get or create a Spotify client with global credentials. Get or create a Spotify client with global credentials.
Implements client reuse and periodic reinitialization. Implements client reuse and periodic reinitialization.
""" """
global _spotify_client, _last_client_init global _spotify_client, _last_client_init
current_time = time.time() current_time = time.time()
# Reinitialize client if it's been more than an hour or if client doesn't exist # Reinitialize client if it's been more than an hour or if client doesn't exist
if (_spotify_client is None or if (
current_time - _last_client_init > _client_init_interval): _spotify_client is None
or current_time - _last_client_init > _client_init_interval
):
client_id, client_secret = _get_global_spotify_api_creds() client_id, client_secret = _get_global_spotify_api_creds()
if not client_id or not client_secret: if not client_id or not client_secret:
@@ -42,177 +41,198 @@ def _get_spotify_client():
# Create new client # Create new client
_spotify_client = spotipy.Spotify( _spotify_client = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials( client_credentials_manager=SpotifyClientCredentials(
client_id=client_id, client_id=client_id, client_secret=client_secret
client_secret=client_secret
) )
) )
_last_client_init = current_time _last_client_init = current_time
logger.info("Spotify client initialized/reinitialized") logger.info("Spotify client initialized/reinitialized")
return _spotify_client return _spotify_client
def _rate_limit_handler(func): def _rate_limit_handler(func):
""" """
Decorator to handle rate limiting with exponential backoff. Decorator to handle rate limiting with exponential backoff.
""" """
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
max_retries = 3 max_retries = 3
base_delay = 1 base_delay = 1
for attempt in range(max_retries): for attempt in range(max_retries):
try: try:
return func(*args, **kwargs) return func(*args, **kwargs)
except Exception as e: except Exception as e:
if "429" in str(e) or "rate limit" in str(e).lower(): if "429" in str(e) or "rate limit" in str(e).lower():
if attempt < max_retries - 1: if attempt < max_retries - 1:
delay = base_delay * (2 ** attempt) delay = base_delay * (2**attempt)
logger.warning(f"Rate limited, retrying in {delay} seconds...") logger.warning(f"Rate limited, retrying in {delay} seconds...")
time.sleep(delay) time.sleep(delay)
continue continue
raise e raise e
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapper return wrapper
@_rate_limit_handler @_rate_limit_handler
def get_playlist_metadata(playlist_id: str) -> Dict[str, Any]: def get_playlist_metadata(playlist_id: str) -> Dict[str, Any]:
""" """
Get playlist metadata only (no tracks) to avoid rate limiting. Get playlist metadata only (no tracks) to avoid rate limiting.
Args: Args:
playlist_id: The Spotify playlist ID playlist_id: The Spotify playlist ID
Returns: Returns:
Dictionary with playlist metadata (name, description, owner, etc.) Dictionary with playlist metadata (name, description, owner, etc.)
""" """
client = _get_spotify_client() client = _get_spotify_client()
try: try:
# Get basic playlist info without tracks # Get basic playlist info without tracks
playlist = client.playlist(playlist_id, fields="id,name,description,owner,images,snapshot_id,public,followers,tracks.total") playlist = client.playlist(
playlist_id,
fields="id,name,description,owner,images,snapshot_id,public,followers,tracks.total",
)
# Add a flag to indicate this is metadata only # Add a flag to indicate this is metadata only
playlist['_metadata_only'] = True playlist["_metadata_only"] = True
playlist['_tracks_loaded'] = False playlist["_tracks_loaded"] = False
logger.debug(f"Retrieved playlist metadata for {playlist_id}: {playlist.get('name', 'Unknown')}") logger.debug(
f"Retrieved playlist metadata for {playlist_id}: {playlist.get('name', 'Unknown')}"
)
return playlist return playlist
except Exception as e: except Exception as e:
logger.error(f"Error fetching playlist metadata for {playlist_id}: {e}") logger.error(f"Error fetching playlist metadata for {playlist_id}: {e}")
raise raise
@_rate_limit_handler @_rate_limit_handler
def get_playlist_tracks(playlist_id: str, limit: int = 100, offset: int = 0) -> Dict[str, Any]: def get_playlist_tracks(
playlist_id: str, limit: int = 100, offset: int = 0
) -> Dict[str, Any]:
""" """
Get playlist tracks with pagination support to handle large playlists efficiently. Get playlist tracks with pagination support to handle large playlists efficiently.
Args: Args:
playlist_id: The Spotify playlist ID playlist_id: The Spotify playlist ID
limit: Number of tracks to fetch per request (max 100) limit: Number of tracks to fetch per request (max 100)
offset: Starting position for pagination offset: Starting position for pagination
Returns: Returns:
Dictionary with tracks data Dictionary with tracks data
""" """
client = _get_spotify_client() client = _get_spotify_client()
try: try:
# Get tracks with specified limit and offset # Get tracks with specified limit and offset
tracks_data = client.playlist_tracks( tracks_data = client.playlist_tracks(
playlist_id, playlist_id,
limit=min(limit, 100), # Spotify API max is 100 limit=min(limit, 100), # Spotify API max is 100
offset=offset, offset=offset,
fields="items(track(id,name,artists,album,external_urls,preview_url,duration_ms,explicit,popularity)),total,limit,offset" fields="items(track(id,name,artists,album,external_urls,preview_url,duration_ms,explicit,popularity)),total,limit,offset",
)
logger.debug(
f"Retrieved {len(tracks_data.get('items', []))} tracks for playlist {playlist_id} (offset: {offset})"
) )
logger.debug(f"Retrieved {len(tracks_data.get('items', []))} tracks for playlist {playlist_id} (offset: {offset})")
return tracks_data return tracks_data
except Exception as e: except Exception as e:
logger.error(f"Error fetching playlist tracks for {playlist_id}: {e}") logger.error(f"Error fetching playlist tracks for {playlist_id}: {e}")
raise raise
@_rate_limit_handler @_rate_limit_handler
def get_playlist_full(playlist_id: str, batch_size: int = 100) -> Dict[str, Any]: def get_playlist_full(playlist_id: str, batch_size: int = 100) -> Dict[str, Any]:
""" """
Get complete playlist data with all tracks, using batched requests to avoid rate limiting. Get complete playlist data with all tracks, using batched requests to avoid rate limiting.
Args: Args:
playlist_id: The Spotify playlist ID playlist_id: The Spotify playlist ID
batch_size: Number of tracks to fetch per batch (max 100) batch_size: Number of tracks to fetch per batch (max 100)
Returns: Returns:
Complete playlist data with all tracks Complete playlist data with all tracks
""" """
client = _get_spotify_client()
try: try:
# First get metadata # First get metadata
playlist = get_playlist_metadata(playlist_id) playlist = get_playlist_metadata(playlist_id)
# Get total track count # Get total track count
total_tracks = playlist.get('tracks', {}).get('total', 0) total_tracks = playlist.get("tracks", {}).get("total", 0)
if total_tracks == 0: if total_tracks == 0:
playlist['tracks'] = {'items': [], 'total': 0} playlist["tracks"] = {"items": [], "total": 0}
return playlist return playlist
# Fetch all tracks in batches # Fetch all tracks in batches
all_tracks = [] all_tracks = []
offset = 0 offset = 0
while offset < total_tracks: while offset < total_tracks:
batch = get_playlist_tracks(playlist_id, limit=batch_size, offset=offset) batch = get_playlist_tracks(playlist_id, limit=batch_size, offset=offset)
batch_items = batch.get('items', []) batch_items = batch.get("items", [])
all_tracks.extend(batch_items) all_tracks.extend(batch_items)
offset += len(batch_items) offset += len(batch_items)
# Add small delay between batches to be respectful to API # Add small delay between batches to be respectful to API
if offset < total_tracks: if offset < total_tracks:
time.sleep(0.1) time.sleep(0.1)
# Update playlist with complete tracks data # Update playlist with complete tracks data
playlist['tracks'] = { playlist["tracks"] = {
'items': all_tracks, "items": all_tracks,
'total': total_tracks, "total": total_tracks,
'limit': batch_size, "limit": batch_size,
'offset': 0 "offset": 0,
} }
playlist['_metadata_only'] = False playlist["_metadata_only"] = False
playlist['_tracks_loaded'] = True playlist["_tracks_loaded"] = True
logger.info(f"Retrieved complete playlist {playlist_id} with {total_tracks} tracks") logger.info(
f"Retrieved complete playlist {playlist_id} with {total_tracks} tracks"
)
return playlist return playlist
except Exception as e: except Exception as e:
logger.error(f"Error fetching complete playlist {playlist_id}: {e}") logger.error(f"Error fetching complete playlist {playlist_id}: {e}")
raise raise
def check_playlist_updated(playlist_id: str, last_snapshot_id: str) -> bool: def check_playlist_updated(playlist_id: str, last_snapshot_id: str) -> bool:
""" """
Check if playlist has been updated by comparing snapshot_id. Check if playlist has been updated by comparing snapshot_id.
This is much more efficient than fetching all tracks. This is much more efficient than fetching all tracks.
Args: Args:
playlist_id: The Spotify playlist ID playlist_id: The Spotify playlist ID
last_snapshot_id: The last known snapshot_id last_snapshot_id: The last known snapshot_id
Returns: Returns:
True if playlist has been updated, False otherwise True if playlist has been updated, False otherwise
""" """
try: try:
metadata = get_playlist_metadata(playlist_id) metadata = get_playlist_metadata(playlist_id)
current_snapshot_id = metadata.get('snapshot_id') current_snapshot_id = metadata.get("snapshot_id")
return current_snapshot_id != last_snapshot_id return current_snapshot_id != last_snapshot_id
except Exception as e: except Exception as e:
logger.error(f"Error checking playlist update status for {playlist_id}: {e}") logger.error(f"Error checking playlist update status for {playlist_id}: {e}")
raise raise
@_rate_limit_handler @_rate_limit_handler
def get_spotify_info(spotify_id: str, spotify_type: str, limit: Optional[int] = None, offset: Optional[int] = None) -> Dict[str, Any]: def get_spotify_info(
spotify_id: str,
spotify_type: str,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Dict[str, Any]:
""" """
Get info from Spotify API using Spotipy directly. Get info from Spotify API using Spotipy directly.
Optimized to prevent rate limiting by using appropriate endpoints. Optimized to prevent rate limiting by using appropriate endpoints.
@@ -227,37 +247,35 @@ def get_spotify_info(spotify_id: str, spotify_type: str, limit: Optional[int] =
Dictionary with the entity information Dictionary with the entity information
""" """
client = _get_spotify_client() client = _get_spotify_client()
try: try:
if spotify_type == "track": if spotify_type == "track":
return client.track(spotify_id) return client.track(spotify_id)
elif spotify_type == "album": elif spotify_type == "album":
return client.album(spotify_id) return client.album(spotify_id)
elif spotify_type == "playlist": elif spotify_type == "playlist":
# Use optimized playlist fetching # Use optimized playlist fetching
return get_playlist_full(spotify_id) return get_playlist_full(spotify_id)
elif spotify_type == "playlist_metadata": elif spotify_type == "playlist_metadata":
# Get only metadata for playlists # Get only metadata for playlists
return get_playlist_metadata(spotify_id) return get_playlist_metadata(spotify_id)
elif spotify_type == "artist": elif spotify_type == "artist":
return client.artist(spotify_id) return client.artist(spotify_id)
elif spotify_type == "artist_discography": elif spotify_type == "artist_discography":
# Get artist's albums with pagination # Get artist's albums with pagination
albums = client.artist_albums( albums = client.artist_albums(
spotify_id, spotify_id, limit=limit or 20, offset=offset or 0
limit=limit or 20,
offset=offset or 0
) )
return albums return albums
elif spotify_type == "episode": elif spotify_type == "episode":
return client.episode(spotify_id) return client.episode(spotify_id)
else: else:
raise ValueError(f"Unsupported Spotify type: {spotify_type}") raise ValueError(f"Unsupported Spotify type: {spotify_type}")
@@ -265,17 +283,19 @@ def get_spotify_info(spotify_id: str, spotify_type: str, limit: Optional[int] =
logger.error(f"Error fetching {spotify_type} {spotify_id}: {e}") logger.error(f"Error fetching {spotify_type} {spotify_id}: {e}")
raise raise
# Cache for playlist metadata to reduce API calls # Cache for playlist metadata to reduce API calls
_playlist_metadata_cache = {} _playlist_metadata_cache: Dict[str, tuple[Dict[str, Any], float]] = {}
_cache_ttl = 300 # 5 minutes cache _cache_ttl = 300 # 5 minutes cache
def get_cached_playlist_metadata(playlist_id: str) -> Optional[Dict[str, Any]]: def get_cached_playlist_metadata(playlist_id: str) -> Optional[Dict[str, Any]]:
""" """
Get playlist metadata from cache if available and not expired. Get playlist metadata from cache if available and not expired.
Args: Args:
playlist_id: The Spotify playlist ID playlist_id: The Spotify playlist ID
Returns: Returns:
Cached metadata or None if not available/expired Cached metadata or None if not available/expired
""" """
@@ -283,44 +303,48 @@ def get_cached_playlist_metadata(playlist_id: str) -> Optional[Dict[str, Any]]:
cached_data, timestamp = _playlist_metadata_cache[playlist_id] cached_data, timestamp = _playlist_metadata_cache[playlist_id]
if time.time() - timestamp < _cache_ttl: if time.time() - timestamp < _cache_ttl:
return cached_data return cached_data
return None return None
def cache_playlist_metadata(playlist_id: str, metadata: Dict[str, Any]): def cache_playlist_metadata(playlist_id: str, metadata: Dict[str, Any]):
""" """
Cache playlist metadata with timestamp. Cache playlist metadata with timestamp.
Args: Args:
playlist_id: The Spotify playlist ID playlist_id: The Spotify playlist ID
metadata: The metadata to cache metadata: The metadata to cache
""" """
_playlist_metadata_cache[playlist_id] = (metadata, time.time()) _playlist_metadata_cache[playlist_id] = (metadata, time.time())
def get_playlist_info_optimized(playlist_id: str, include_tracks: bool = False) -> Dict[str, Any]:
def get_playlist_info_optimized(
playlist_id: str, include_tracks: bool = False
) -> Dict[str, Any]:
""" """
Optimized playlist info function that uses caching and selective loading. Optimized playlist info function that uses caching and selective loading.
Args: Args:
playlist_id: The Spotify playlist ID playlist_id: The Spotify playlist ID
include_tracks: Whether to include track data (default: False to save API calls) include_tracks: Whether to include track data (default: False to save API calls)
Returns: Returns:
Playlist data with or without tracks Playlist data with or without tracks
""" """
# Check cache first # Check cache first
cached_metadata = get_cached_playlist_metadata(playlist_id) cached_metadata = get_cached_playlist_metadata(playlist_id)
if cached_metadata and not include_tracks: if cached_metadata and not include_tracks:
logger.debug(f"Returning cached metadata for playlist {playlist_id}") logger.debug(f"Returning cached metadata for playlist {playlist_id}")
return cached_metadata return cached_metadata
if include_tracks: if include_tracks:
# Get complete playlist data # Get complete playlist data
playlist_data = get_playlist_full(playlist_id) playlist_data = get_playlist_full(playlist_id)
# Cache the metadata portion # Cache the metadata portion
metadata_only = {k: v for k, v in playlist_data.items() if k != 'tracks'} metadata_only = {k: v for k, v in playlist_data.items() if k != "tracks"}
metadata_only['_metadata_only'] = True metadata_only["_metadata_only"] = True
metadata_only['_tracks_loaded'] = False metadata_only["_tracks_loaded"] = False
cache_playlist_metadata(playlist_id, metadata_only) cache_playlist_metadata(playlist_id, metadata_only)
return playlist_data return playlist_data
else: else:
@@ -329,6 +353,7 @@ def get_playlist_info_optimized(playlist_id: str, include_tracks: bool = False)
cache_playlist_metadata(playlist_id, metadata) cache_playlist_metadata(playlist_id, metadata)
return metadata return metadata
# Keep the existing Deezer functions unchanged # Keep the existing Deezer functions unchanged
def get_deezer_info(deezer_id, deezer_type, limit=None): def get_deezer_info(deezer_id, deezer_type, limit=None):
""" """

File diff suppressed because it is too large Load Diff

View File

@@ -24,13 +24,17 @@ def download_playlist(
progress_callback=None, progress_callback=None,
convert_to=None, convert_to=None,
bitrate=None, bitrate=None,
artist_separator="; ",
recursive_quality=True,
_is_celery_task_execution=False, # Added to skip duplicate check from Celery task _is_celery_task_execution=False, # Added to skip duplicate check from Celery task
): ):
if not _is_celery_task_execution: if not _is_celery_task_execution:
existing_task = get_existing_task_id(url) # Check for duplicates only if not called by Celery task existing_task = get_existing_task_id(
url
) # Check for duplicates only if not called by Celery task
if existing_task: if existing_task:
raise DuplicateDownloadError( raise DuplicateDownloadError(
f"Download for this URL is already in progress.", "Download for this URL is already in progress.",
existing_task=existing_task, existing_task=existing_task,
) )
try: try:
@@ -93,7 +97,7 @@ def download_playlist(
link_playlist=url, # Spotify URL link_playlist=url, # Spotify URL
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, # Deezer quality quality_download=quality, # Deezer quality
recursive_quality=True, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
make_zip=False, make_zip=False,
@@ -106,6 +110,7 @@ def download_playlist(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL." f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL."
@@ -153,7 +158,7 @@ def download_playlist(
link_playlist=url, # Spotify URL link_playlist=url, # Spotify URL
output_dir="./downloads", output_dir="./downloads",
quality_download=fall_quality, # Spotify quality quality_download=fall_quality, # Spotify quality
recursive_quality=True, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
make_zip=False, make_zip=False,
@@ -167,6 +172,7 @@ def download_playlist(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful." f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful."
@@ -213,7 +219,7 @@ def download_playlist(
link_playlist=url, link_playlist=url,
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=True, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
make_zip=False, make_zip=False,
@@ -227,6 +233,7 @@ def download_playlist(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful." f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful."
@@ -254,7 +261,7 @@ def download_playlist(
link_playlist=url, link_playlist=url,
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=False, # Usually False for playlists to get individual track qualities recursive_quality=recursive_quality, # Usually False for playlists to get individual track qualities
recursive_download=False, recursive_download=False,
make_zip=False, make_zip=False,
custom_dir_format=custom_dir_format, custom_dir_format=custom_dir_format,
@@ -266,6 +273,7 @@ def download_playlist(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful." f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful."

View File

@@ -25,6 +25,8 @@ def download_track(
progress_callback=None, progress_callback=None,
convert_to=None, convert_to=None,
bitrate=None, bitrate=None,
artist_separator="; ",
recursive_quality=False,
_is_celery_task_execution=False, # Added for consistency, not currently used for duplicate check _is_celery_task_execution=False, # Added for consistency, not currently used for duplicate check
): ):
try: try:
@@ -91,7 +93,7 @@ def download_track(
link_track=url, # Spotify URL link_track=url, # Spotify URL
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, # Deezer quality quality_download=quality, # Deezer quality
recursive_quality=False, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
custom_dir_format=custom_dir_format, custom_dir_format=custom_dir_format,
@@ -102,6 +104,7 @@ def download_track(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL." f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL."
@@ -147,7 +150,7 @@ def download_track(
link_track=url, # Spotify URL link_track=url, # Spotify URL
output_dir="./downloads", output_dir="./downloads",
quality_download=fall_quality, # Spotify quality quality_download=fall_quality, # Spotify quality
recursive_quality=False, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
real_time_dl=real_time, real_time_dl=real_time,
@@ -160,6 +163,7 @@ def download_track(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful." f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful."
@@ -202,7 +206,7 @@ def download_track(
link_track=url, link_track=url,
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=False, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
not_interface=False, not_interface=False,
real_time_dl=real_time, real_time_dl=real_time,
@@ -215,6 +219,7 @@ def download_track(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful." f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful."
@@ -242,7 +247,7 @@ def download_track(
link_track=url, link_track=url,
output_dir="./downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=False, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
custom_dir_format=custom_dir_format, custom_dir_format=custom_dir_format,
custom_track_format=custom_track_format, custom_track_format=custom_track_format,
@@ -253,6 +258,7 @@ def download_track(
max_retries=max_retries, max_retries=max_retries,
convert_to=convert_to, convert_to=convert_to,
bitrate=bitrate, bitrate=bitrate,
artist_separator=artist_separator,
) )
print( print(
f"DEBUG: track.py - Direct Deezer download (account: {main}) successful." f"DEBUG: track.py - Direct Deezer download (account: {main}) successful."

View File

@@ -166,11 +166,11 @@ def init_playlists_db():
"watched playlists", "watched playlists",
): ):
conn.commit() conn.commit()
# Update all existing playlist track tables with new schema # Update all existing playlist track tables with new schema
_update_all_playlist_track_tables(cursor) _update_all_playlist_track_tables(cursor)
conn.commit() conn.commit()
logger.info( logger.info(
f"Playlists database initialized/updated successfully at {PLAYLISTS_DB_PATH}" f"Playlists database initialized/updated successfully at {PLAYLISTS_DB_PATH}"
) )
@@ -183,9 +183,11 @@ def _update_all_playlist_track_tables(cursor: sqlite3.Cursor):
"""Updates all existing playlist track tables to ensure they have the latest schema.""" """Updates all existing playlist track tables to ensure they have the latest schema."""
try: try:
# Get all table names that start with 'playlist_' # Get all table names that start with 'playlist_'
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'playlist_%'") cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'playlist_%'"
)
playlist_tables = cursor.fetchall() playlist_tables = cursor.fetchall()
for table_row in playlist_tables: for table_row in playlist_tables:
table_name = table_row[0] table_name = table_row[0]
if _ensure_table_schema( if _ensure_table_schema(
@@ -194,8 +196,10 @@ def _update_all_playlist_track_tables(cursor: sqlite3.Cursor):
EXPECTED_PLAYLIST_TRACKS_COLUMNS, EXPECTED_PLAYLIST_TRACKS_COLUMNS,
f"playlist tracks ({table_name})", f"playlist tracks ({table_name})",
): ):
logger.info(f"Updated schema for existing playlist track table: {table_name}") logger.info(
f"Updated schema for existing playlist track table: {table_name}"
)
except sqlite3.Error as e: except sqlite3.Error as e:
logger.error(f"Error updating playlist track tables schema: {e}", exc_info=True) logger.error(f"Error updating playlist track tables schema: {e}", exc_info=True)
@@ -205,7 +209,7 @@ def update_all_existing_tables_schema():
try: try:
with _get_playlists_db_connection() as conn: with _get_playlists_db_connection() as conn:
cursor = conn.cursor() cursor = conn.cursor()
# Update main watched_playlists table # Update main watched_playlists table
if _ensure_table_schema( if _ensure_table_schema(
cursor, cursor,
@@ -214,13 +218,15 @@ def update_all_existing_tables_schema():
"watched playlists", "watched playlists",
): ):
logger.info("Updated schema for watched_playlists table") logger.info("Updated schema for watched_playlists table")
# Update all playlist track tables # Update all playlist track tables
_update_all_playlist_track_tables(cursor) _update_all_playlist_track_tables(cursor)
conn.commit() conn.commit()
logger.info("Successfully updated all existing tables schema in playlists database") logger.info(
"Successfully updated all existing tables schema in playlists database"
)
except sqlite3.Error as e: except sqlite3.Error as e:
logger.error(f"Error updating existing tables schema: {e}", exc_info=True) logger.error(f"Error updating existing tables schema: {e}", exc_info=True)
raise raise
@@ -232,15 +238,17 @@ def ensure_playlist_table_schema(playlist_spotify_id: str):
try: try:
with _get_playlists_db_connection() as conn: with _get_playlists_db_connection() as conn:
cursor = conn.cursor() cursor = conn.cursor()
# Check if table exists # Check if table exists
cursor.execute( cursor.execute(
f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';" f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';"
) )
if cursor.fetchone() is None: if cursor.fetchone() is None:
logger.warning(f"Table {table_name} does not exist. Cannot update schema.") logger.warning(
f"Table {table_name} does not exist. Cannot update schema."
)
return False return False
# Update schema # Update schema
if _ensure_table_schema( if _ensure_table_schema(
cursor, cursor,
@@ -252,11 +260,16 @@ def ensure_playlist_table_schema(playlist_spotify_id: str):
logger.info(f"Updated schema for playlist track table: {table_name}") logger.info(f"Updated schema for playlist track table: {table_name}")
return True return True
else: else:
logger.info(f"Schema already up-to-date for playlist track table: {table_name}") logger.info(
f"Schema already up-to-date for playlist track table: {table_name}"
)
return True return True
except sqlite3.Error as e: except sqlite3.Error as e:
logger.error(f"Error updating schema for playlist {playlist_spotify_id}: {e}", exc_info=True) logger.error(
f"Error updating schema for playlist {playlist_spotify_id}: {e}",
exc_info=True,
)
return False return False
@@ -306,10 +319,10 @@ def add_playlist_to_watch(playlist_data: dict):
"""Adds a playlist to the watched_playlists table and creates its tracks table in playlists.db.""" """Adds a playlist to the watched_playlists table and creates its tracks table in playlists.db."""
try: try:
_create_playlist_tracks_table(playlist_data["id"]) _create_playlist_tracks_table(playlist_data["id"])
# Construct Spotify URL manually since external_urls might not be present in metadata # Construct Spotify URL manually since external_urls might not be present in metadata
spotify_url = f"https://open.spotify.com/playlist/{playlist_data['id']}" spotify_url = f"https://open.spotify.com/playlist/{playlist_data['id']}"
with _get_playlists_db_connection() as conn: # Use playlists connection with _get_playlists_db_connection() as conn: # Use playlists connection
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute( cursor.execute(
@@ -455,10 +468,12 @@ def get_playlist_track_ids_from_db(playlist_spotify_id: str):
return track_ids return track_ids
def get_playlist_tracks_with_snapshot_from_db(playlist_spotify_id: str): def get_playlist_tracks_with_snapshot_from_db(
playlist_spotify_id: str,
) -> dict[str, dict[str, str]]:
"""Retrieves all tracks with their snapshot_ids from a specific playlist's tracks table in playlists.db.""" """Retrieves all tracks with their snapshot_ids from a specific playlist's tracks table in playlists.db."""
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
tracks_data = {} tracks_data: dict[str, dict[str, str]] = {}
try: try:
with _get_playlists_db_connection() as conn: # Use playlists connection with _get_playlists_db_connection() as conn: # Use playlists connection
cursor = conn.cursor() cursor = conn.cursor()
@@ -470,7 +485,7 @@ def get_playlist_tracks_with_snapshot_from_db(playlist_spotify_id: str):
f"Track table {table_name} does not exist in {PLAYLISTS_DB_PATH}. Cannot fetch track data." f"Track table {table_name} does not exist in {PLAYLISTS_DB_PATH}. Cannot fetch track data."
) )
return tracks_data return tracks_data
# Ensure the table has the latest schema before querying # Ensure the table has the latest schema before querying
_ensure_table_schema( _ensure_table_schema(
cursor, cursor,
@@ -478,7 +493,7 @@ def get_playlist_tracks_with_snapshot_from_db(playlist_spotify_id: str):
EXPECTED_PLAYLIST_TRACKS_COLUMNS, EXPECTED_PLAYLIST_TRACKS_COLUMNS,
f"playlist tracks ({playlist_spotify_id})", f"playlist tracks ({playlist_spotify_id})",
) )
cursor.execute( cursor.execute(
f"SELECT spotify_track_id, snapshot_id, title FROM {table_name} WHERE is_present_in_spotify = 1" f"SELECT spotify_track_id, snapshot_id, title FROM {table_name} WHERE is_present_in_spotify = 1"
) )
@@ -486,7 +501,7 @@ def get_playlist_tracks_with_snapshot_from_db(playlist_spotify_id: str):
for row in rows: for row in rows:
tracks_data[row["spotify_track_id"]] = { tracks_data[row["spotify_track_id"]] = {
"snapshot_id": row["snapshot_id"], "snapshot_id": row["snapshot_id"],
"title": row["title"] "title": row["title"],
} }
return tracks_data return tracks_data
except sqlite3.Error as e: except sqlite3.Error as e:
@@ -508,7 +523,7 @@ def get_playlist_total_tracks_from_db(playlist_spotify_id: str) -> int:
) )
if cursor.fetchone() is None: if cursor.fetchone() is None:
return 0 return 0
# Ensure the table has the latest schema before querying # Ensure the table has the latest schema before querying
_ensure_table_schema( _ensure_table_schema(
cursor, cursor,
@@ -516,7 +531,7 @@ def get_playlist_total_tracks_from_db(playlist_spotify_id: str) -> int:
EXPECTED_PLAYLIST_TRACKS_COLUMNS, EXPECTED_PLAYLIST_TRACKS_COLUMNS,
f"playlist tracks ({playlist_spotify_id})", f"playlist tracks ({playlist_spotify_id})",
) )
cursor.execute( cursor.execute(
f"SELECT COUNT(*) as count FROM {table_name} WHERE is_present_in_spotify = 1" f"SELECT COUNT(*) as count FROM {table_name} WHERE is_present_in_spotify = 1"
) )
@@ -530,12 +545,14 @@ def get_playlist_total_tracks_from_db(playlist_spotify_id: str) -> int:
return 0 return 0
def add_tracks_to_playlist_db(playlist_spotify_id: str, tracks_data: list, snapshot_id: str = None): def add_tracks_to_playlist_db(
playlist_spotify_id: str, tracks_data: list, snapshot_id: str = None
):
""" """
Updates existing tracks in the playlist's DB table to mark them as currently present Updates existing tracks in the playlist's DB table to mark them as currently present
in Spotify and updates their last_seen timestamp and snapshot_id. Also refreshes metadata. in Spotify and updates their last_seen timestamp and snapshot_id. Also refreshes metadata.
Does NOT insert new tracks. New tracks are only added upon successful download. Does NOT insert new tracks. New tracks are only added upon successful download.
Args: Args:
playlist_spotify_id: The Spotify playlist ID playlist_spotify_id: The Spotify playlist ID
tracks_data: List of track items from Spotify API tracks_data: List of track items from Spotify API
@@ -574,8 +591,10 @@ def add_tracks_to_playlist_db(playlist_spotify_id: str, tracks_data: list, snaps
track_number = track.get("track_number") track_number = track.get("track_number")
# Log the raw track_number value for debugging # Log the raw track_number value for debugging
if track_number is None or track_number == 0: if track_number is None or track_number == 0:
logger.debug(f"Track '{track.get('name', 'Unknown')}' has track_number: {track_number} (raw API value)") logger.debug(
f"Track '{track.get('name', 'Unknown')}' has track_number: {track_number} (raw API value)"
)
# Prepare tuple for UPDATE statement. # Prepare tuple for UPDATE statement.
# Order: title, artist_names, album_name, album_artist_names, track_number, # Order: title, artist_names, album_name, album_artist_names, track_number,
# album_spotify_id, duration_ms, added_at_playlist, # album_spotify_id, duration_ms, added_at_playlist,
@@ -790,11 +809,16 @@ def remove_specific_tracks_from_playlist_table(
return 0 return 0
def add_single_track_to_playlist_db(playlist_spotify_id: str, track_item_for_db: dict, snapshot_id: str = None, task_id: str = None): def add_single_track_to_playlist_db(
playlist_spotify_id: str,
track_item_for_db: dict,
snapshot_id: str = None,
task_id: str = None,
):
""" """
Adds or updates a single track in the specified playlist's tracks table in playlists.db. Adds or updates a single track in the specified playlist's tracks table in playlists.db.
Uses deezspot callback data as the source of metadata. Uses deezspot callback data as the source of metadata.
Args: Args:
playlist_spotify_id: The Spotify playlist ID playlist_spotify_id: The Spotify playlist ID
track_item_for_db: Track item data (used only for spotify_track_id and added_at) track_item_for_db: Track item data (used only for spotify_track_id and added_at)
@@ -802,68 +826,87 @@ def add_single_track_to_playlist_db(playlist_spotify_id: str, track_item_for_db:
task_id: Task ID to extract metadata from callback data task_id: Task ID to extract metadata from callback data
""" """
if not task_id: if not task_id:
logger.error(f"No task_id provided for playlist {playlist_spotify_id}. Task ID is required to extract metadata from deezspot callback.") logger.error(
f"No task_id provided for playlist {playlist_spotify_id}. Task ID is required to extract metadata from deezspot callback."
)
return return
if not track_item_for_db or not track_item_for_db.get("track", {}).get("id"): if not track_item_for_db or not track_item_for_db.get("track", {}).get("id"):
logger.error(f"No track_item_for_db or spotify track ID provided for playlist {playlist_spotify_id}") logger.error(
f"No track_item_for_db or spotify track ID provided for playlist {playlist_spotify_id}"
)
return return
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
# Extract metadata ONLY from deezspot callback data # Extract metadata ONLY from deezspot callback data
try: try:
# Import here to avoid circular imports # Import here to avoid circular imports
from routes.utils.celery_tasks import get_last_task_status from routes.utils.celery_tasks import get_last_task_status
last_status = get_last_task_status(task_id) last_status = get_last_task_status(task_id)
if not last_status or "raw_callback" not in last_status: if not last_status or "raw_callback" not in last_status:
logger.error(f"No raw_callback found in task status for task {task_id}. Cannot extract metadata.") logger.error(
f"No raw_callback found in task status for task {task_id}. Cannot extract metadata."
)
return return
callback_data = last_status["raw_callback"] callback_data = last_status["raw_callback"]
# Extract metadata from deezspot callback using correct structure from callbacks.ts # Extract metadata from deezspot callback using correct structure from callbacks.ts
track_obj = callback_data.get("track", {}) track_obj = callback_data.get("track", {})
if not track_obj: if not track_obj:
logger.error(f"No track object found in callback data for task {task_id}") logger.error(f"No track object found in callback data for task {task_id}")
return return
track_name = track_obj.get("title", "N/A") track_name = track_obj.get("title", "N/A")
track_number = track_obj.get("track_number", 1) # Default to 1 if missing track_number = track_obj.get("track_number", 1) # Default to 1 if missing
duration_ms = track_obj.get("duration_ms", 0) duration_ms = track_obj.get("duration_ms", 0)
# Extract artist names from artists array # Extract artist names from artists array
artists = track_obj.get("artists", []) artists = track_obj.get("artists", [])
artist_names = ", ".join([artist.get("name", "") for artist in artists if artist.get("name")]) artist_names = ", ".join(
[artist.get("name", "") for artist in artists if artist.get("name")]
)
if not artist_names: if not artist_names:
artist_names = "N/A" artist_names = "N/A"
# Extract album information # Extract album information
album_obj = track_obj.get("album", {}) album_obj = track_obj.get("album", {})
album_name = album_obj.get("title", "N/A") album_name = album_obj.get("title", "N/A")
# Extract album artist names from album artists array # Extract album artist names from album artists array
album_artists = album_obj.get("artists", []) album_artists = album_obj.get("artists", [])
album_artist_names = ", ".join([artist.get("name", "") for artist in album_artists if artist.get("name")]) album_artist_names = ", ".join(
[artist.get("name", "") for artist in album_artists if artist.get("name")]
)
if not album_artist_names: if not album_artist_names:
album_artist_names = "N/A" album_artist_names = "N/A"
logger.debug(f"Extracted metadata from deezspot callback for '{track_name}': track_number={track_number}") logger.debug(
f"Extracted metadata from deezspot callback for '{track_name}': track_number={track_number}"
)
except Exception as e: except Exception as e:
logger.error(f"Error extracting metadata from task {task_id} callback: {e}", exc_info=True) logger.error(
f"Error extracting metadata from task {task_id} callback: {e}",
exc_info=True,
)
return return
current_time = int(time.time()) current_time = int(time.time())
# Get spotify_track_id and added_at from original track_item_for_db # Get spotify_track_id and added_at from original track_item_for_db
track_id = track_item_for_db["track"]["id"] track_id = track_item_for_db["track"]["id"]
added_at = track_item_for_db.get("added_at") added_at = track_item_for_db.get("added_at")
album_id = track_item_for_db.get("track", {}).get("album", {}).get("id") # Only album ID from original data album_id = (
track_item_for_db.get("track", {}).get("album", {}).get("id")
logger.info(f"Adding track '{track_name}' (ID: {track_id}) to playlist {playlist_spotify_id} with track_number: {track_number} (from deezspot callback)") ) # Only album ID from original data
logger.info(
f"Adding track '{track_name}' (ID: {track_id}) to playlist {playlist_spotify_id} with track_number: {track_number} (from deezspot callback)"
)
track_data_tuple = ( track_data_tuple = (
track_id, track_id,
track_name, track_name,

View File

@@ -28,7 +28,6 @@ from routes.utils.get_info import (
get_spotify_info, get_spotify_info,
get_playlist_metadata, get_playlist_metadata,
get_playlist_tracks, get_playlist_tracks,
check_playlist_updated,
) # To fetch playlist, track, artist, and album details ) # To fetch playlist, track, artist, and album details
from routes.utils.celery_queue_manager import download_queue_manager from routes.utils.celery_queue_manager import download_queue_manager
@@ -38,12 +37,12 @@ STOP_EVENT = threading.Event()
# Format mapping for audio file conversions # Format mapping for audio file conversions
AUDIO_FORMAT_EXTENSIONS = { AUDIO_FORMAT_EXTENSIONS = {
'mp3': '.mp3', "mp3": ".mp3",
'flac': '.flac', "flac": ".flac",
'm4a': '.m4a', "m4a": ".m4a",
'aac': '.m4a', "aac": ".m4a",
'ogg': '.ogg', "ogg": ".ogg",
'wav': '.wav', "wav": ".wav",
} }
DEFAULT_WATCH_CONFIG = { DEFAULT_WATCH_CONFIG = {
@@ -106,11 +105,11 @@ def has_playlist_changed(playlist_spotify_id: str, current_snapshot_id: str) ->
""" """
Check if a playlist has changed by comparing snapshot_id. Check if a playlist has changed by comparing snapshot_id.
This is much more efficient than fetching all tracks. This is much more efficient than fetching all tracks.
Args: Args:
playlist_spotify_id: The Spotify playlist ID playlist_spotify_id: The Spotify playlist ID
current_snapshot_id: The current snapshot_id from API current_snapshot_id: The current snapshot_id from API
Returns: Returns:
True if playlist has changed, False otherwise True if playlist has changed, False otherwise
""" """
@@ -119,29 +118,33 @@ def has_playlist_changed(playlist_spotify_id: str, current_snapshot_id: str) ->
if not db_playlist: if not db_playlist:
# Playlist not in database, consider it as "changed" to trigger initial processing # Playlist not in database, consider it as "changed" to trigger initial processing
return True return True
last_snapshot_id = db_playlist.get("snapshot_id") last_snapshot_id = db_playlist.get("snapshot_id")
if not last_snapshot_id: if not last_snapshot_id:
# No previous snapshot_id, consider it as "changed" to trigger initial processing # No previous snapshot_id, consider it as "changed" to trigger initial processing
return True return True
return current_snapshot_id != last_snapshot_id return current_snapshot_id != last_snapshot_id
except Exception as e: except Exception as e:
logger.error(f"Error checking playlist change status for {playlist_spotify_id}: {e}") logger.error(
f"Error checking playlist change status for {playlist_spotify_id}: {e}"
)
# On error, assume playlist has changed to be safe # On error, assume playlist has changed to be safe
return True return True
def needs_track_sync(playlist_spotify_id: str, current_snapshot_id: str, api_total_tracks: int) -> tuple[bool, list[str]]: def needs_track_sync(
playlist_spotify_id: str, current_snapshot_id: str, api_total_tracks: int
) -> tuple[bool, list[str]]:
""" """
Check if tracks need to be synchronized by comparing snapshot_ids and total counts. Check if tracks need to be synchronized by comparing snapshot_ids and total counts.
Args: Args:
playlist_spotify_id: The Spotify playlist ID playlist_spotify_id: The Spotify playlist ID
current_snapshot_id: The current snapshot_id from API current_snapshot_id: The current snapshot_id from API
api_total_tracks: The total number of tracks reported by API api_total_tracks: The total number of tracks reported by API
Returns: Returns:
Tuple of (needs_sync, tracks_to_find) where: Tuple of (needs_sync, tracks_to_find) where:
- needs_sync: True if tracks need to be synchronized - needs_sync: True if tracks need to be synchronized
@@ -151,7 +154,7 @@ def needs_track_sync(playlist_spotify_id: str, current_snapshot_id: str, api_tot
# Get tracks from database with their snapshot_ids # Get tracks from database with their snapshot_ids
db_tracks = get_playlist_tracks_with_snapshot_from_db(playlist_spotify_id) db_tracks = get_playlist_tracks_with_snapshot_from_db(playlist_spotify_id)
db_total_tracks = get_playlist_total_tracks_from_db(playlist_spotify_id) db_total_tracks = get_playlist_total_tracks_from_db(playlist_spotify_id)
# Check if total count matches # Check if total count matches
if db_total_tracks != api_total_tracks: if db_total_tracks != api_total_tracks:
logger.info( logger.info(
@@ -160,39 +163,41 @@ def needs_track_sync(playlist_spotify_id: str, current_snapshot_id: str, api_tot
# Always do full sync when counts don't match to ensure we don't miss any tracks # Always do full sync when counts don't match to ensure we don't miss any tracks
# This handles cases like: # This handles cases like:
# - Empty database (DB=0, API=1345) # - Empty database (DB=0, API=1345)
# - Missing tracks (DB=1000, API=1345) # - Missing tracks (DB=1000, API=1345)
# - Removed tracks (DB=1345, API=1000) # - Removed tracks (DB=1345, API=1000)
return True, [] # Empty list indicates full sync needed return True, [] # Empty list indicates full sync needed
# Check if any tracks have different snapshot_id # Check if any tracks have different snapshot_id
tracks_to_find = [] tracks_to_find = []
for track_id, track_data in db_tracks.items(): for track_id, track_data in db_tracks.items():
if track_data.get("snapshot_id") != current_snapshot_id: if track_data.get("snapshot_id") != current_snapshot_id:
tracks_to_find.append(track_id) tracks_to_find.append(track_id)
if tracks_to_find: if tracks_to_find:
logger.info( logger.info(
f"Found {len(tracks_to_find)} tracks with outdated snapshot_id for playlist {playlist_spotify_id}" f"Found {len(tracks_to_find)} tracks with outdated snapshot_id for playlist {playlist_spotify_id}"
) )
return True, tracks_to_find return True, tracks_to_find
return False, [] return False, []
except Exception as e: except Exception as e:
logger.error(f"Error checking track sync status for {playlist_spotify_id}: {e}") logger.error(f"Error checking track sync status for {playlist_spotify_id}: {e}")
# On error, assume sync is needed to be safe # On error, assume sync is needed to be safe
return True, [] return True, []
def find_tracks_in_playlist(playlist_spotify_id: str, tracks_to_find: list[str], current_snapshot_id: str) -> tuple[list, list]: def find_tracks_in_playlist(
playlist_spotify_id: str, tracks_to_find: list[str], current_snapshot_id: str
) -> tuple[list, list]:
""" """
Progressively fetch playlist tracks until all specified tracks are found or playlist is exhausted. Progressively fetch playlist tracks until all specified tracks are found or playlist is exhausted.
Args: Args:
playlist_spotify_id: The Spotify playlist ID playlist_spotify_id: The Spotify playlist ID
tracks_to_find: List of track IDs to find tracks_to_find: List of track IDs to find
current_snapshot_id: The current snapshot_id current_snapshot_id: The current snapshot_id
Returns: Returns:
Tuple of (found_tracks, not_found_tracks) where: Tuple of (found_tracks, not_found_tracks) where:
- found_tracks: List of track items that were found - found_tracks: List of track items that were found
@@ -202,24 +207,28 @@ def find_tracks_in_playlist(playlist_spotify_id: str, tracks_to_find: list[str],
not_found_tracks = tracks_to_find.copy() not_found_tracks = tracks_to_find.copy()
offset = 0 offset = 0
limit = 100 limit = 100
logger.info( logger.info(
f"Searching for {len(tracks_to_find)} tracks in playlist {playlist_spotify_id} starting from offset {offset}" f"Searching for {len(tracks_to_find)} tracks in playlist {playlist_spotify_id} starting from offset {offset}"
) )
while not_found_tracks and offset < 10000: # Safety limit while not_found_tracks and offset < 10000: # Safety limit
try: try:
tracks_batch = get_playlist_tracks(playlist_spotify_id, limit=limit, offset=offset) tracks_batch = get_playlist_tracks(
playlist_spotify_id, limit=limit, offset=offset
)
if not tracks_batch or "items" not in tracks_batch: if not tracks_batch or "items" not in tracks_batch:
logger.warning(f"No tracks returned for playlist {playlist_spotify_id} at offset {offset}") logger.warning(
f"No tracks returned for playlist {playlist_spotify_id} at offset {offset}"
)
break break
batch_items = tracks_batch.get("items", []) batch_items = tracks_batch.get("items", [])
if not batch_items: if not batch_items:
logger.info(f"No more tracks found at offset {offset}") logger.info(f"No more tracks found at offset {offset}")
break break
# Check each track in this batch # Check each track in this batch
for track_item in batch_items: for track_item in batch_items:
track = track_item.get("track") track = track_item.get("track")
@@ -229,22 +238,24 @@ def find_tracks_in_playlist(playlist_spotify_id: str, tracks_to_find: list[str],
found_tracks.append(track_item) found_tracks.append(track_item)
not_found_tracks.remove(track_id) not_found_tracks.remove(track_id)
logger.debug(f"Found track {track_id} at offset {offset}") logger.debug(f"Found track {track_id} at offset {offset}")
offset += len(batch_items) offset += len(batch_items)
# Add small delay between batches # Add small delay between batches
time.sleep(0.1) time.sleep(0.1)
except Exception as e: except Exception as e:
logger.error(f"Error fetching tracks batch for playlist {playlist_spotify_id} at offset {offset}: {e}") logger.error(
f"Error fetching tracks batch for playlist {playlist_spotify_id} at offset {offset}: {e}"
)
break break
logger.info( logger.info(
f"Track search complete for playlist {playlist_spotify_id}: " f"Track search complete for playlist {playlist_spotify_id}: "
f"Found {len(found_tracks)}/{len(tracks_to_find)} tracks, " f"Found {len(found_tracks)}/{len(tracks_to_find)} tracks, "
f"Not found: {len(not_found_tracks)}" f"Not found: {len(not_found_tracks)}"
) )
return found_tracks, not_found_tracks return found_tracks, not_found_tracks
@@ -283,7 +294,7 @@ def check_watched_playlists(specific_playlist_id: str = None):
try: try:
# Ensure the playlist's track table has the latest schema before processing # Ensure the playlist's track table has the latest schema before processing
ensure_playlist_table_schema(playlist_spotify_id) ensure_playlist_table_schema(playlist_spotify_id)
# First, get playlist metadata to check if it has changed # First, get playlist metadata to check if it has changed
current_playlist_metadata = get_playlist_metadata(playlist_spotify_id) current_playlist_metadata = get_playlist_metadata(playlist_spotify_id)
if not current_playlist_metadata: if not current_playlist_metadata:
@@ -293,17 +304,23 @@ def check_watched_playlists(specific_playlist_id: str = None):
continue continue
api_snapshot_id = current_playlist_metadata.get("snapshot_id") api_snapshot_id = current_playlist_metadata.get("snapshot_id")
api_total_tracks = current_playlist_metadata.get("tracks", {}).get("total", 0) api_total_tracks = current_playlist_metadata.get("tracks", {}).get(
"total", 0
)
# Enhanced snapshot_id checking with track-level tracking # Enhanced snapshot_id checking with track-level tracking
if use_snapshot_checking: if use_snapshot_checking:
# First check if playlist snapshot_id has changed # First check if playlist snapshot_id has changed
playlist_changed = has_playlist_changed(playlist_spotify_id, api_snapshot_id) playlist_changed = has_playlist_changed(
playlist_spotify_id, api_snapshot_id
)
if not playlist_changed: if not playlist_changed:
# Even if playlist snapshot_id hasn't changed, check if individual tracks need sync # Even if playlist snapshot_id hasn't changed, check if individual tracks need sync
needs_sync, tracks_to_find = needs_track_sync(playlist_spotify_id, api_snapshot_id, api_total_tracks) needs_sync, tracks_to_find = needs_track_sync(
playlist_spotify_id, api_snapshot_id, api_total_tracks
)
if not needs_sync: if not needs_sync:
logger.info( logger.info(
f"Playlist Watch Manager: Playlist '{playlist_name}' ({playlist_spotify_id}) has not changed since last check (snapshot_id: {api_snapshot_id}). Skipping detailed check." f"Playlist Watch Manager: Playlist '{playlist_name}' ({playlist_spotify_id}) has not changed since last check (snapshot_id: {api_snapshot_id}). Skipping detailed check."
@@ -321,19 +338,25 @@ def check_watched_playlists(specific_playlist_id: str = None):
f"Playlist Watch Manager: Playlist '{playlist_name}' snapshot_id unchanged, but {len(tracks_to_find)} tracks need sync. Proceeding with targeted check." f"Playlist Watch Manager: Playlist '{playlist_name}' snapshot_id unchanged, but {len(tracks_to_find)} tracks need sync. Proceeding with targeted check."
) )
# Use targeted track search instead of full fetch # Use targeted track search instead of full fetch
found_tracks, not_found_tracks = find_tracks_in_playlist(playlist_spotify_id, tracks_to_find, api_snapshot_id) found_tracks, not_found_tracks = find_tracks_in_playlist(
playlist_spotify_id, tracks_to_find, api_snapshot_id
)
# Update found tracks with new snapshot_id # Update found tracks with new snapshot_id
if found_tracks: if found_tracks:
add_tracks_to_playlist_db(playlist_spotify_id, found_tracks, api_snapshot_id) add_tracks_to_playlist_db(
playlist_spotify_id, found_tracks, api_snapshot_id
)
# Mark not found tracks as removed # Mark not found tracks as removed
if not_found_tracks: if not_found_tracks:
logger.info( logger.info(
f"Playlist Watch Manager: {len(not_found_tracks)} tracks not found in playlist '{playlist_name}'. Marking as removed." f"Playlist Watch Manager: {len(not_found_tracks)} tracks not found in playlist '{playlist_name}'. Marking as removed."
) )
mark_tracks_as_not_present_in_spotify(playlist_spotify_id, not_found_tracks) mark_tracks_as_not_present_in_spotify(
playlist_spotify_id, not_found_tracks
)
# Update the playlist's m3u file after tracks are removed # Update the playlist's m3u file after tracks are removed
try: try:
logger.info( logger.info(
@@ -347,7 +370,9 @@ def check_watched_playlists(specific_playlist_id: str = None):
) )
# Update playlist snapshot and continue to next playlist # Update playlist snapshot and continue to next playlist
update_playlist_snapshot(playlist_spotify_id, api_snapshot_id, api_total_tracks) update_playlist_snapshot(
playlist_spotify_id, api_snapshot_id, api_total_tracks
)
logger.info( logger.info(
f"Playlist Watch Manager: Finished targeted sync for playlist '{playlist_name}'. Snapshot ID updated to {api_snapshot_id}." f"Playlist Watch Manager: Finished targeted sync for playlist '{playlist_name}'. Snapshot ID updated to {api_snapshot_id}."
) )
@@ -369,18 +394,18 @@ def check_watched_playlists(specific_playlist_id: str = None):
logger.info( logger.info(
f"Playlist Watch Manager: Fetching all tracks for playlist '{playlist_name}' ({playlist_spotify_id}) with {api_total_tracks} total tracks." f"Playlist Watch Manager: Fetching all tracks for playlist '{playlist_name}' ({playlist_spotify_id}) with {api_total_tracks} total tracks."
) )
all_api_track_items = [] all_api_track_items = []
offset = 0 offset = 0
limit = 100 # Use maximum batch size for efficiency limit = 100 # Use maximum batch size for efficiency
while offset < api_total_tracks: while offset < api_total_tracks:
try: try:
# Use the optimized get_playlist_tracks function # Use the optimized get_playlist_tracks function
tracks_batch = get_playlist_tracks( tracks_batch = get_playlist_tracks(
playlist_spotify_id, limit=limit, offset=offset playlist_spotify_id, limit=limit, offset=offset
) )
if not tracks_batch or "items" not in tracks_batch: if not tracks_batch or "items" not in tracks_batch:
logger.warning( logger.warning(
f"Playlist Watch Manager: No tracks returned for playlist {playlist_spotify_id} at offset {offset}" f"Playlist Watch Manager: No tracks returned for playlist {playlist_spotify_id} at offset {offset}"
@@ -390,14 +415,14 @@ def check_watched_playlists(specific_playlist_id: str = None):
batch_items = tracks_batch.get("items", []) batch_items = tracks_batch.get("items", [])
if not batch_items: if not batch_items:
break break
all_api_track_items.extend(batch_items) all_api_track_items.extend(batch_items)
offset += len(batch_items) offset += len(batch_items)
# Add small delay between batches to be respectful to API # Add small delay between batches to be respectful to API
if offset < api_total_tracks: if offset < api_total_tracks:
time.sleep(0.1) time.sleep(0.1)
except Exception as e: except Exception as e:
logger.error( logger.error(
f"Playlist Watch Manager: Error fetching tracks batch for playlist {playlist_spotify_id} at offset {offset}: {e}" f"Playlist Watch Manager: Error fetching tracks batch for playlist {playlist_spotify_id} at offset {offset}: {e}"
@@ -482,7 +507,9 @@ def check_watched_playlists(specific_playlist_id: str = None):
logger.info( logger.info(
f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'." f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'."
) )
add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items, api_snapshot_id) add_tracks_to_playlist_db(
playlist_spotify_id, all_api_track_items, api_snapshot_id
)
removed_db_ids = db_track_ids - current_api_track_ids removed_db_ids = db_track_ids - current_api_track_ids
if removed_db_ids: if removed_db_ids:
@@ -504,7 +531,7 @@ def check_watched_playlists(specific_playlist_id: str = None):
logger.error( logger.error(
f"Failed to update m3u file for playlist '{playlist_name}' after playlist changes: {m3u_update_err}", f"Failed to update m3u file for playlist '{playlist_name}' after playlist changes: {m3u_update_err}",
exc_info=True, exc_info=True,
) )
update_playlist_snapshot( update_playlist_snapshot(
playlist_spotify_id, api_snapshot_id, api_total_tracks playlist_spotify_id, api_snapshot_id, api_total_tracks
@@ -564,11 +591,11 @@ def check_watched_artists(specific_artist_id: str = None):
all_artist_albums_from_api: List[Dict[str, Any]] = [] all_artist_albums_from_api: List[Dict[str, Any]] = []
offset = 0 offset = 0
limit = 50 # Spotify API limit for artist albums limit = 50 # Spotify API limit for artist albums
logger.info( logger.info(
f"Artist Watch Manager: Fetching albums for artist '{artist_name}' ({artist_spotify_id})" f"Artist Watch Manager: Fetching albums for artist '{artist_name}' ({artist_spotify_id})"
) )
while True: while True:
logger.debug( logger.debug(
f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}" f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}"
@@ -810,13 +837,18 @@ def start_watch_manager(): # Renamed from start_playlist_watch_manager
init_playlists_db() # For playlists init_playlists_db() # For playlists
init_artists_db() # For artists init_artists_db() # For artists
# Update all existing tables to ensure they have the latest schema # Update all existing tables to ensure they have the latest schema
try: try:
update_all_existing_tables_schema() update_all_existing_tables_schema()
logger.info("Watch Manager: Successfully updated all existing tables schema") logger.info(
"Watch Manager: Successfully updated all existing tables schema"
)
except Exception as e: except Exception as e:
logger.error(f"Watch Manager: Error updating existing tables schema: {e}", exc_info=True) logger.error(
f"Watch Manager: Error updating existing tables schema: {e}",
exc_info=True,
)
_watch_scheduler_thread = threading.Thread( _watch_scheduler_thread = threading.Thread(
target=playlist_watch_scheduler, daemon=True target=playlist_watch_scheduler, daemon=True
@@ -847,22 +879,26 @@ def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
def get_playlist_tracks_for_m3u(playlist_spotify_id: str) -> List[Dict[str, Any]]: def get_playlist_tracks_for_m3u(playlist_spotify_id: str) -> List[Dict[str, Any]]:
""" """
Get all tracks for a playlist from the database with complete metadata needed for m3u generation. Get all tracks for a playlist from the database with complete metadata needed for m3u generation.
Args: Args:
playlist_spotify_id: The Spotify playlist ID playlist_spotify_id: The Spotify playlist ID
Returns: Returns:
List of track dictionaries with metadata List of track dictionaries with metadata
""" """
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
tracks = [] tracks: List[Dict[str, Any]] = []
try: try:
from routes.utils.watch.db import _get_playlists_db_connection, _ensure_table_schema, EXPECTED_PLAYLIST_TRACKS_COLUMNS from routes.utils.watch.db import (
_get_playlists_db_connection,
_ensure_table_schema,
EXPECTED_PLAYLIST_TRACKS_COLUMNS,
)
with _get_playlists_db_connection() as conn: with _get_playlists_db_connection() as conn:
cursor = conn.cursor() cursor = conn.cursor()
# Check if table exists # Check if table exists
cursor.execute( cursor.execute(
f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';" f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';"
@@ -872,7 +908,7 @@ def get_playlist_tracks_for_m3u(playlist_spotify_id: str) -> List[Dict[str, Any]
f"Track table {table_name} does not exist. Cannot generate m3u file." f"Track table {table_name} does not exist. Cannot generate m3u file."
) )
return tracks return tracks
# Ensure the table has the latest schema before querying # Ensure the table has the latest schema before querying
_ensure_table_schema( _ensure_table_schema(
cursor, cursor,
@@ -880,30 +916,33 @@ def get_playlist_tracks_for_m3u(playlist_spotify_id: str) -> List[Dict[str, Any]
EXPECTED_PLAYLIST_TRACKS_COLUMNS, EXPECTED_PLAYLIST_TRACKS_COLUMNS,
f"playlist tracks ({playlist_spotify_id})", f"playlist tracks ({playlist_spotify_id})",
) )
# Get all tracks that are present in Spotify # Get all tracks that are present in Spotify
cursor.execute(f""" cursor.execute(f"""
SELECT spotify_track_id, title, artist_names, album_name, SELECT spotify_track_id, title, artist_names, album_name,
album_artist_names, track_number, duration_ms album_artist_names, track_number, duration_ms
FROM {table_name} FROM {table_name}
WHERE is_present_in_spotify = 1 WHERE is_present_in_spotify = 1
ORDER BY track_number, title ORDER BY track_number, title
""") """)
rows = cursor.fetchall() rows = cursor.fetchall()
for row in rows: for row in rows:
tracks.append({ tracks.append(
"spotify_track_id": row["spotify_track_id"], {
"title": row["title"] or "Unknown Track", "spotify_track_id": row["spotify_track_id"],
"artist_names": row["artist_names"] or "Unknown Artist", "title": row["title"] or "Unknown Track",
"album_name": row["album_name"] or "Unknown Album", "artist_names": row["artist_names"] or "Unknown Artist",
"album_artist_names": row["album_artist_names"] or "Unknown Artist", "album_name": row["album_name"] or "Unknown Album",
"track_number": row["track_number"] or 0, "album_artist_names": row["album_artist_names"]
"duration_ms": row["duration_ms"] or 0, or "Unknown Artist",
}) "track_number": row["track_number"] or 0,
"duration_ms": row["duration_ms"] or 0,
}
)
return tracks return tracks
except Exception as e: except Exception as e:
logger.error( logger.error(
f"Error retrieving tracks for m3u generation for playlist {playlist_spotify_id}: {e}", f"Error retrieving tracks for m3u generation for playlist {playlist_spotify_id}: {e}",
@@ -912,17 +951,22 @@ def get_playlist_tracks_for_m3u(playlist_spotify_id: str) -> List[Dict[str, Any]
return tracks return tracks
def generate_track_file_path(track: Dict[str, Any], custom_dir_format: str, custom_track_format: str, convert_to: str = None) -> str: def generate_track_file_path(
track: Dict[str, Any],
custom_dir_format: str,
custom_track_format: str,
convert_to: str = None,
) -> str:
""" """
Generate the file path for a track based on custom format strings. Generate the file path for a track based on custom format strings.
This mimics the path generation logic used by the deezspot library. This mimics the path generation logic used by the deezspot library.
Args: Args:
track: Track metadata dictionary track: Track metadata dictionary
custom_dir_format: Directory format string (e.g., "%ar_album%/%album%") custom_dir_format: Directory format string (e.g., "%ar_album%/%album%")
custom_track_format: Track format string (e.g., "%tracknum%. %music% - %artist%") custom_track_format: Track format string (e.g., "%tracknum%. %music% - %artist%")
convert_to: Target conversion format (e.g., "mp3", "flac", "m4a") convert_to: Target conversion format (e.g., "mp3", "flac", "m4a")
Returns: Returns:
Generated file path relative to output directory Generated file path relative to output directory
""" """
@@ -934,23 +978,25 @@ def generate_track_file_path(track: Dict[str, Any], custom_dir_format: str, cust
title = track.get("title", "Unknown Track") title = track.get("title", "Unknown Track")
track_number = track.get("track_number", 0) track_number = track.get("track_number", 0)
duration_ms = track.get("duration_ms", 0) duration_ms = track.get("duration_ms", 0)
# Use album artist for directory structure, main artist for track name # Use album artist for directory structure, main artist for track name
main_artist = artist_names.split(", ")[0] if artist_names else "Unknown Artist" main_artist = artist_names.split(", ")[0] if artist_names else "Unknown Artist"
album_artist = album_artist_names.split(", ")[0] if album_artist_names else main_artist album_artist = (
album_artist_names.split(", ")[0] if album_artist_names else main_artist
)
# Clean names for filesystem # Clean names for filesystem
def clean_name(name): def clean_name(name):
# Remove or replace characters that are problematic in filenames # Remove or replace characters that are problematic in filenames
name = re.sub(r'[<>:"/\\|?*]', '_', str(name)) name = re.sub(r'[<>:"/\\|?*]', "_", str(name))
name = re.sub(r'[\x00-\x1f]', '', name) # Remove control characters name = re.sub(r"[\x00-\x1f]", "", name) # Remove control characters
return name.strip() return name.strip()
clean_album_artist = clean_name(album_artist) clean_album_artist = clean_name(album_artist)
clean_album = clean_name(album_name) clean_album = clean_name(album_name)
clean_main_artist = clean_name(main_artist) clean_main_artist = clean_name(main_artist)
clean_title = clean_name(title) clean_title = clean_name(title)
# Prepare placeholder replacements # Prepare placeholder replacements
replacements = { replacements = {
# Common placeholders # Common placeholders
@@ -960,58 +1006,66 @@ def generate_track_file_path(track: Dict[str, Any], custom_dir_format: str, cust
"%ar_album%": clean_album_artist, "%ar_album%": clean_album_artist,
"%tracknum%": f"{track_number:02d}" if track_number > 0 else "00", "%tracknum%": f"{track_number:02d}" if track_number > 0 else "00",
"%year%": "", # Not available in current DB schema "%year%": "", # Not available in current DB schema
# Additional placeholders (not available in current DB schema, using defaults) # Additional placeholders (not available in current DB schema, using defaults)
"%discnum%": "01", # Default to disc 1 "%discnum%": "01", # Default to disc 1
"%date%": "", # Not available "%date%": "", # Not available
"%genre%": "", # Not available "%genre%": "", # Not available
"%isrc%": "", # Not available "%isrc%": "", # Not available
"%explicit%": "", # Not available "%explicit%": "", # Not available
"%duration%": str(duration_ms // 1000) if duration_ms > 0 else "0", # Convert ms to seconds "%duration%": str(duration_ms // 1000)
if duration_ms > 0
else "0", # Convert ms to seconds
} }
# Apply replacements to directory format # Apply replacements to directory format
dir_path = custom_dir_format dir_path = custom_dir_format
for placeholder, value in replacements.items(): for placeholder, value in replacements.items():
dir_path = dir_path.replace(placeholder, value) dir_path = dir_path.replace(placeholder, value)
# Apply replacements to track format # Apply replacements to track format
track_filename = custom_track_format track_filename = custom_track_format
for placeholder, value in replacements.items(): for placeholder, value in replacements.items():
track_filename = track_filename.replace(placeholder, value) track_filename = track_filename.replace(placeholder, value)
# Combine and clean up path # Combine and clean up path
full_path = os.path.join(dir_path, track_filename) full_path = os.path.join(dir_path, track_filename)
full_path = os.path.normpath(full_path) full_path = os.path.normpath(full_path)
# Determine file extension based on convert_to setting or default to mp3 # Determine file extension based on convert_to setting or default to mp3
if not any(full_path.lower().endswith(ext) for ext in ['.mp3', '.flac', '.m4a', '.ogg', '.wav']): if not any(
full_path.lower().endswith(ext)
for ext in [".mp3", ".flac", ".m4a", ".ogg", ".wav"]
):
if convert_to: if convert_to:
extension = AUDIO_FORMAT_EXTENSIONS.get(convert_to.lower(), '.mp3') extension = AUDIO_FORMAT_EXTENSIONS.get(convert_to.lower(), ".mp3")
full_path += extension full_path += extension
else: else:
full_path += '.mp3' # Default fallback full_path += ".mp3" # Default fallback
return full_path return full_path
except Exception as e: except Exception as e:
logger.error(f"Error generating file path for track {track.get('title', 'Unknown')}: {e}") logger.error(
f"Error generating file path for track {track.get('title', 'Unknown')}: {e}"
)
# Return a fallback path with appropriate extension # Return a fallback path with appropriate extension
safe_title = re.sub(r'[<>:"/\\|?*\x00-\x1f]', '_', str(track.get('title', 'Unknown Track'))) safe_title = re.sub(
r'[<>:"/\\|?*\x00-\x1f]', "_", str(track.get("title", "Unknown Track"))
)
# Determine extension for fallback # Determine extension for fallback
if convert_to: if convert_to:
extension = AUDIO_FORMAT_EXTENSIONS.get(convert_to.lower(), '.mp3') extension = AUDIO_FORMAT_EXTENSIONS.get(convert_to.lower(), ".mp3")
else: else:
extension = '.mp3' extension = ".mp3"
return f"Unknown Artist/Unknown Album/{safe_title}{extension}" return f"Unknown Artist/Unknown Album/{safe_title}{extension}"
def update_playlist_m3u_file(playlist_spotify_id: str): def update_playlist_m3u_file(playlist_spotify_id: str):
""" """
Generate/update the m3u file for a watched playlist based on tracks in the database. Generate/update the m3u file for a watched playlist based on tracks in the database.
Args: Args:
playlist_spotify_id: The Spotify playlist ID playlist_spotify_id: The Spotify playlist ID
""" """
@@ -1019,62 +1073,79 @@ def update_playlist_m3u_file(playlist_spotify_id: str):
# Get playlist metadata # Get playlist metadata
playlist_info = get_watched_playlist(playlist_spotify_id) playlist_info = get_watched_playlist(playlist_spotify_id)
if not playlist_info: if not playlist_info:
logger.warning(f"Playlist {playlist_spotify_id} not found in watched playlists. Cannot update m3u file.") logger.warning(
f"Playlist {playlist_spotify_id} not found in watched playlists. Cannot update m3u file."
)
return return
playlist_name = playlist_info.get("name", "Unknown Playlist") playlist_name = playlist_info.get("name", "Unknown Playlist")
# Get configuration settings # Get configuration settings
from routes.utils.celery_config import get_config_params from routes.utils.celery_config import get_config_params
config = get_config_params() config = get_config_params()
custom_dir_format = config.get("customDirFormat", "%ar_album%/%album%") custom_dir_format = config.get("customDirFormat", "%ar_album%/%album%")
custom_track_format = config.get("customTrackFormat", "%tracknum%. %music%") custom_track_format = config.get("customTrackFormat", "%tracknum%. %music%")
convert_to = config.get("convertTo") # Get conversion format setting convert_to = config.get("convertTo") # Get conversion format setting
output_dir = "./downloads" # This matches the output_dir used in download functions output_dir = (
"./downloads" # This matches the output_dir used in download functions
)
# Get all tracks for the playlist # Get all tracks for the playlist
tracks = get_playlist_tracks_for_m3u(playlist_spotify_id) tracks = get_playlist_tracks_for_m3u(playlist_spotify_id)
if not tracks: if not tracks:
logger.info(f"No tracks found for playlist '{playlist_name}'. M3U file will be empty or removed.") logger.info(
f"No tracks found for playlist '{playlist_name}'. M3U file will be empty or removed."
)
# Clean playlist name for filename # Clean playlist name for filename
safe_playlist_name = re.sub(r'[<>:"/\\|?*\x00-\x1f]', '_', playlist_name).strip() safe_playlist_name = re.sub(
r'[<>:"/\\|?*\x00-\x1f]', "_", playlist_name
).strip()
# Create m3u file path # Create m3u file path
playlists_dir = Path(output_dir) / "playlists" playlists_dir = Path(output_dir) / "playlists"
playlists_dir.mkdir(parents=True, exist_ok=True) playlists_dir.mkdir(parents=True, exist_ok=True)
m3u_file_path = playlists_dir / f"{safe_playlist_name}.m3u" m3u_file_path = playlists_dir / f"{safe_playlist_name}.m3u"
# Generate m3u content # Generate m3u content
m3u_lines = ["#EXTM3U"] m3u_lines = ["#EXTM3U"]
for track in tracks: for track in tracks:
# Generate file path for this track # Generate file path for this track
track_file_path = generate_track_file_path(track, custom_dir_format, custom_track_format, convert_to) track_file_path = generate_track_file_path(
track, custom_dir_format, custom_track_format, convert_to
)
# Create relative path from m3u file location to track file # Create relative path from m3u file location to track file
# M3U file is in ./downloads/playlists/ # M3U file is in ./downloads/playlists/
# Track files are in ./downloads/{custom_dir_format}/ # Track files are in ./downloads/{custom_dir_format}/
relative_path = os.path.join("..", track_file_path) relative_path = os.path.join("..", track_file_path)
relative_path = relative_path.replace("\\", "/") # Use forward slashes for m3u compatibility relative_path = relative_path.replace(
"\\", "/"
) # Use forward slashes for m3u compatibility
# Add EXTINF line with track duration and title # Add EXTINF line with track duration and title
duration_seconds = (track.get("duration_ms", 0) // 1000) if track.get("duration_ms") else -1 duration_seconds = (
(track.get("duration_ms", 0) // 1000)
if track.get("duration_ms")
else -1
)
artist_and_title = f"{track.get('artist_names', 'Unknown Artist')} - {track.get('title', 'Unknown Track')}" artist_and_title = f"{track.get('artist_names', 'Unknown Artist')} - {track.get('title', 'Unknown Track')}"
m3u_lines.append(f"#EXTINF:{duration_seconds},{artist_and_title}") m3u_lines.append(f"#EXTINF:{duration_seconds},{artist_and_title}")
m3u_lines.append(relative_path) m3u_lines.append(relative_path)
# Write m3u file # Write m3u file
with open(m3u_file_path, 'w', encoding='utf-8') as f: with open(m3u_file_path, "w", encoding="utf-8") as f:
f.write('\n'.join(m3u_lines)) f.write("\n".join(m3u_lines))
logger.info( logger.info(
f"Updated m3u file for playlist '{playlist_name}' at {m3u_file_path} with {len(tracks)} tracks{f' (format: {convert_to})' if convert_to else ''}." f"Updated m3u file for playlist '{playlist_name}' at {m3u_file_path} with {len(tracks)} tracks{f' (format: {convert_to})' if convert_to else ''}."
) )
except Exception as e: except Exception as e:
logger.error( logger.error(
f"Error updating m3u file for playlist {playlist_spotify_id}: {e}", f"Error updating m3u file for playlist {playlist_spotify_id}: {e}",

View File

@@ -1,7 +1,7 @@
{ {
"name": "spotizerr-ui", "name": "spotizerr-ui",
"private": true, "private": true,
"version": "3.0.5", "version": "3.0.6",
"type": "module", "type": "module",
"scripts": { "scripts": {
"dev": "vite", "dev": "vite",

View File

@@ -21,6 +21,7 @@ interface DownloadSettings {
hlsThreads: number; hlsThreads: number;
deezerQuality: "MP3_128" | "MP3_320" | "FLAC"; deezerQuality: "MP3_128" | "MP3_320" | "FLAC";
spotifyQuality: "NORMAL" | "HIGH" | "VERY_HIGH"; spotifyQuality: "NORMAL" | "HIGH" | "VERY_HIGH";
recursiveQuality?: boolean; // frontend field (mapped to recursive_quality on save)
} }
interface WatchConfig { interface WatchConfig {
@@ -49,8 +50,14 @@ const CONVERSION_FORMATS: Record<string, string[]> = {
}; };
// --- API Functions --- // --- API Functions ---
const saveDownloadConfig = async (data: Partial<DownloadSettings>) => { const saveDownloadConfig = async (data: Partial<DownloadSettings> & { recursive_quality?: boolean }) => {
const { data: response } = await authApiClient.client.post("/config", data); // Map camelCase to snake_case for backend compatibility
const payload: any = { ...data };
if (typeof data.recursiveQuality !== "undefined") {
payload.recursive_quality = data.recursiveQuality;
delete payload.recursiveQuality;
}
const { data: response } = await authApiClient.client.post("/config", payload);
return response; return response;
}; };
@@ -189,6 +196,10 @@ export function DownloadsTab({ config, isLoading }: DownloadsTabProps) {
<label htmlFor="fallbackToggle" className="text-content-primary dark:text-content-primary-dark">Download Fallback</label> <label htmlFor="fallbackToggle" className="text-content-primary dark:text-content-primary-dark">Download Fallback</label>
<input id="fallbackToggle" type="checkbox" {...register("fallback")} className="h-6 w-6 rounded" /> <input id="fallbackToggle" type="checkbox" {...register("fallback")} className="h-6 w-6 rounded" />
</div> </div>
<div className="flex items-center justify-between">
<label htmlFor="recursiveQualityToggle" className="text-content-primary dark:text-content-primary-dark">Recursive Quality</label>
<input id="recursiveQualityToggle" type="checkbox" {...register("recursiveQuality")} className="h-6 w-6 rounded" />
</div>
{/* Watch validation info */} {/* Watch validation info */}
{watchConfig?.enabled && ( {watchConfig?.enabled && (

View File

@@ -14,6 +14,7 @@ interface FormattingSettings {
album: string; album: string;
playlist: string; playlist: string;
compilation: string; compilation: string;
artistSeparator: string;
} }
interface FormattingTabProps { interface FormattingTabProps {
@@ -23,7 +24,12 @@ interface FormattingTabProps {
// --- API Functions --- // --- API Functions ---
const saveFormattingConfig = async (data: Partial<FormattingSettings>) => { const saveFormattingConfig = async (data: Partial<FormattingSettings>) => {
const { data: response } = await authApiClient.client.post("/config", data); const payload: any = { ...data };
if (typeof data.artistSeparator !== "undefined") {
payload.artist_separator = data.artistSeparator;
delete payload.artistSeparator;
}
const { data: response } = await authApiClient.client.post("/config", payload);
return response; return response;
}; };
@@ -160,6 +166,17 @@ export function FormattingTab({ config, isLoading }: FormattingTabProps) {
className="h-6 w-6 rounded" className="h-6 w-6 rounded"
/> />
</div> </div>
<div className="flex items-center justify-between">
<label htmlFor="artistSeparator" className="text-content-primary dark:text-content-primary-dark">Artist Separator</label>
<input
id="artistSeparator"
type="text"
maxLength={8}
placeholder="; "
{...register("artistSeparator")}
className="block w-full p-2 border bg-input-background dark:bg-input-background-dark border-input-border dark:border-input-border-dark rounded-md focus:outline-none focus:ring-2 focus:ring-input-focus"
/>
</div>
<div className="flex items-center justify-between"> <div className="flex items-center justify-between">
<label htmlFor="saveCoverToggle" className="text-content-primary dark:text-content-primary-dark">Save Album Cover</label> <label htmlFor="saveCoverToggle" className="text-content-primary dark:text-content-primary-dark">Save Album Cover</label>
<input id="saveCoverToggle" type="checkbox" {...register("saveCover")} className="h-6 w-6 rounded" /> <input id="saveCoverToggle" type="checkbox" {...register("saveCover")} className="h-6 w-6 rounded" />

View File

@@ -57,6 +57,7 @@ export type FlatAppSettings = {
album: string; album: string;
playlist: string; playlist: string;
compilation: string; compilation: string;
artistSeparator: string;
}; };
const defaultSettings: FlatAppSettings = { const defaultSettings: FlatAppSettings = {
@@ -89,6 +90,7 @@ const defaultSettings: FlatAppSettings = {
album: "{artist_name}/{album_name}", album: "{artist_name}/{album_name}",
playlist: "Playlists/{playlist_name}", playlist: "Playlists/{playlist_name}",
compilation: "Compilations/{album_name}", compilation: "Compilations/{album_name}",
artistSeparator: "; ",
watch: { watch: {
enabled: false, enabled: false,
}, },

View File

@@ -31,6 +31,7 @@ export interface AppSettings {
album: string; album: string;
playlist: string; playlist: string;
compilation: string; compilation: string;
artistSeparator: string;
watch: { watch: {
enabled: boolean; enabled: boolean;
// Add other watch properties from the old type if they still exist in the API response // Add other watch properties from the old type if they still exist in the API response

View File

@@ -29,6 +29,7 @@ export interface AppSettings {
album: string; album: string;
playlist: string; playlist: string;
compilation: string; compilation: string;
artistSeparator: string;
watch: { watch: {
enabled: boolean; enabled: boolean;
// Add other watch properties from the old type if they still exist in the API response // Add other watch properties from the old type if they still exist in the API response