fix(ui): Queue and deezspot callbacks

This commit is contained in:
Xoconoch
2025-08-23 23:00:11 -06:00
parent 453e9cd7fd
commit 0661865d16
21 changed files with 509 additions and 1660 deletions

View File

@@ -3,16 +3,11 @@ import sqlite3
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Optional
from .v3_2_0 import MigrationV3_2_0 from .v3_3_0 import MigrationV3_3_0
from .v3_2_1 import log_noop_migration_detected
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
DATA_DIR = Path("./data") DATA_DIR = Path("./data")
HISTORY_DB = DATA_DIR / "history" / "download_history.db"
WATCH_DIR = DATA_DIR / "watch"
PLAYLISTS_DB = WATCH_DIR / "playlists.db"
ARTISTS_DB = WATCH_DIR / "artists.db"
# Credentials # Credentials
CREDS_DIR = DATA_DIR / "creds" CREDS_DIR = DATA_DIR / "creds"
@@ -20,89 +15,6 @@ ACCOUNTS_DB = CREDS_DIR / "accounts.db"
BLOBS_DIR = CREDS_DIR / "blobs" BLOBS_DIR = CREDS_DIR / "blobs"
SEARCH_JSON = CREDS_DIR / "search.json" SEARCH_JSON = CREDS_DIR / "search.json"
# Expected children table columns for history (album_/playlist_)
CHILDREN_EXPECTED_COLUMNS: dict[str, str] = {
"id": "INTEGER PRIMARY KEY AUTOINCREMENT",
"title": "TEXT NOT NULL",
"artists": "TEXT",
"album_title": "TEXT",
"duration_ms": "INTEGER",
"track_number": "INTEGER",
"disc_number": "INTEGER",
"explicit": "BOOLEAN",
"status": "TEXT NOT NULL",
"external_ids": "TEXT",
"genres": "TEXT",
"isrc": "TEXT",
"timestamp": "REAL NOT NULL",
"position": "INTEGER",
"metadata": "TEXT",
}
# 3.2.0 expected schemas for Watch DBs (kept here to avoid importing modules with side-effects)
EXPECTED_WATCHED_PLAYLISTS_COLUMNS: dict[str, str] = {
"spotify_id": "TEXT PRIMARY KEY",
"name": "TEXT",
"owner_id": "TEXT",
"owner_name": "TEXT",
"total_tracks": "INTEGER",
"link": "TEXT",
"snapshot_id": "TEXT",
"last_checked": "INTEGER",
"added_at": "INTEGER",
"is_active": "INTEGER DEFAULT 1",
}
EXPECTED_PLAYLIST_TRACKS_COLUMNS: dict[str, str] = {
"spotify_track_id": "TEXT PRIMARY KEY",
"title": "TEXT",
"artist_names": "TEXT",
"album_name": "TEXT",
"album_artist_names": "TEXT",
"track_number": "INTEGER",
"album_spotify_id": "TEXT",
"duration_ms": "INTEGER",
"added_at_playlist": "TEXT",
"added_to_db": "INTEGER",
"is_present_in_spotify": "INTEGER DEFAULT 1",
"last_seen_in_spotify": "INTEGER",
"snapshot_id": "TEXT",
"final_path": "TEXT",
}
EXPECTED_WATCHED_ARTISTS_COLUMNS: dict[str, str] = {
"spotify_id": "TEXT PRIMARY KEY",
"name": "TEXT",
"link": "TEXT",
"total_albums_on_spotify": "INTEGER",
"last_checked": "INTEGER",
"added_at": "INTEGER",
"is_active": "INTEGER DEFAULT 1",
"genres": "TEXT",
"popularity": "INTEGER",
"image_url": "TEXT",
}
EXPECTED_ARTIST_ALBUMS_COLUMNS: dict[str, str] = {
"album_spotify_id": "TEXT PRIMARY KEY",
"artist_spotify_id": "TEXT",
"name": "TEXT",
"album_group": "TEXT",
"album_type": "TEXT",
"release_date": "TEXT",
"release_date_precision": "TEXT",
"total_tracks": "INTEGER",
"link": "TEXT",
"image_url": "TEXT",
"added_to_db": "INTEGER",
"last_seen_on_spotify": "INTEGER",
"download_task_id": "TEXT",
"download_status": "INTEGER DEFAULT 0",
"is_fully_downloaded_managed_by_app": "INTEGER DEFAULT 0",
}
m320 = MigrationV3_2_0()
def _safe_connect(path: Path) -> Optional[sqlite3.Connection]: def _safe_connect(path: Path) -> Optional[sqlite3.Connection]:
try: try:
@@ -115,245 +27,6 @@ def _safe_connect(path: Path) -> Optional[sqlite3.Connection]:
return None return None
def _ensure_table_schema(
conn: sqlite3.Connection,
table_name: str,
expected_columns: dict[str, str],
table_description: str,
) -> None:
try:
cur = conn.execute(f"PRAGMA table_info({table_name})")
existing_info = cur.fetchall()
existing_names = {row[1] for row in existing_info}
for col_name, col_type in expected_columns.items():
if col_name in existing_names:
continue
col_type_for_add = (
col_type.replace("PRIMARY KEY", "")
.replace("AUTOINCREMENT", "")
.replace("NOT NULL", "")
.strip()
)
try:
conn.execute(
f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}"
)
logger.info(
f"Added missing column '{col_name} {col_type_for_add}' to {table_description} table '{table_name}'."
)
except sqlite3.OperationalError as e:
logger.warning(
f"Could not add column '{col_name}' to {table_description} table '{table_name}': {e}"
)
except Exception as e:
logger.error(
f"Error ensuring schema for {table_description} table '{table_name}': {e}",
exc_info=True,
)
def _create_or_update_children_table(conn: sqlite3.Connection, table_name: str) -> None:
conn.execute(
f"""
CREATE TABLE IF NOT EXISTS {table_name} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
artists TEXT,
album_title TEXT,
duration_ms INTEGER,
track_number INTEGER,
disc_number INTEGER,
explicit BOOLEAN,
status TEXT NOT NULL,
external_ids TEXT,
genres TEXT,
isrc TEXT,
timestamp REAL NOT NULL,
position INTEGER,
metadata TEXT
)
"""
)
_ensure_table_schema(
conn, table_name, CHILDREN_EXPECTED_COLUMNS, "children history"
)
# --- Helper to validate instance is at least 3.1.2 on history DB ---
def _history_children_tables(conn: sqlite3.Connection) -> list[str]:
tables: set[str] = set()
try:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE 'album_%' OR name LIKE 'playlist_%') AND name != 'download_history'"
)
for row in cur.fetchall():
if row and row[0]:
tables.add(row[0])
except sqlite3.Error as e:
logger.warning(f"Failed to scan sqlite_master for children tables: {e}")
try:
cur = conn.execute(
"SELECT DISTINCT children_table FROM download_history WHERE children_table IS NOT NULL AND TRIM(children_table) != ''"
)
for row in cur.fetchall():
t = row[0]
if t:
tables.add(t)
except sqlite3.Error as e:
logger.warning(f"Failed to scan download_history for children tables: {e}")
return sorted(tables)
def _is_history_at_least_3_2_0(conn: sqlite3.Connection) -> bool:
required_cols = {"service", "quality_format", "quality_bitrate"}
tables = _history_children_tables(conn)
if not tables:
# Nothing to migrate implies OK
return True
for t in tables:
try:
cur = conn.execute(f"PRAGMA table_info({t})")
cols = {row[1] for row in cur.fetchall()}
if not required_cols.issubset(cols):
return False
except sqlite3.OperationalError:
return False
return True
# --- 3.2.0 verification helpers for Watch DBs ---
def _update_watch_playlists_db(conn: sqlite3.Connection) -> None:
try:
# Ensure core watched_playlists table exists and has expected schema
conn.execute(
"""
CREATE TABLE IF NOT EXISTS watched_playlists (
spotify_id TEXT PRIMARY KEY,
name TEXT,
owner_id TEXT,
owner_name TEXT,
total_tracks INTEGER,
link TEXT,
snapshot_id TEXT,
last_checked INTEGER,
added_at INTEGER,
is_active INTEGER DEFAULT 1
)
"""
)
_ensure_table_schema(
conn,
"watched_playlists",
EXPECTED_WATCHED_PLAYLISTS_COLUMNS,
"watched playlists",
)
# Upgrade all dynamic playlist_ tables
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'playlist_%'"
)
for row in cur.fetchall():
table_name = row[0]
conn.execute(
f"""
CREATE TABLE IF NOT EXISTS {table_name} (
spotify_track_id TEXT PRIMARY KEY,
title TEXT,
artist_names TEXT,
album_name TEXT,
album_artist_names TEXT,
track_number INTEGER,
album_spotify_id TEXT,
duration_ms INTEGER,
added_at_playlist TEXT,
added_to_db INTEGER,
is_present_in_spotify INTEGER DEFAULT 1,
last_seen_in_spotify INTEGER,
snapshot_id TEXT,
final_path TEXT
)
"""
)
_ensure_table_schema(
conn,
table_name,
EXPECTED_PLAYLIST_TRACKS_COLUMNS,
f"playlist tracks ({table_name})",
)
except Exception:
logger.error(
"Failed to upgrade watch playlists DB to 3.2.0 base schema", exc_info=True
)
def _update_watch_artists_db(conn: sqlite3.Connection) -> None:
try:
# Ensure core watched_artists table exists and has expected schema
conn.execute(
"""
CREATE TABLE IF NOT EXISTS watched_artists (
spotify_id TEXT PRIMARY KEY,
name TEXT,
link TEXT,
total_albums_on_spotify INTEGER,
last_checked INTEGER,
added_at INTEGER,
is_active INTEGER DEFAULT 1,
genres TEXT,
popularity INTEGER,
image_url TEXT
)
"""
)
_ensure_table_schema(
conn, "watched_artists", EXPECTED_WATCHED_ARTISTS_COLUMNS, "watched artists"
)
# Upgrade all dynamic artist_ tables
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'artist_%'"
)
for row in cur.fetchall():
table_name = row[0]
conn.execute(
f"""
CREATE TABLE IF NOT EXISTS {table_name} (
album_spotify_id TEXT PRIMARY KEY,
artist_spotify_id TEXT,
name TEXT,
album_group TEXT,
album_type TEXT,
release_date TEXT,
release_date_precision TEXT,
total_tracks INTEGER,
link TEXT,
image_url TEXT,
added_to_db INTEGER,
last_seen_on_spotify INTEGER,
download_task_id TEXT,
download_status INTEGER DEFAULT 0,
is_fully_downloaded_managed_by_app INTEGER DEFAULT 0
)
"""
)
_ensure_table_schema(
conn,
table_name,
EXPECTED_ARTIST_ALBUMS_COLUMNS,
f"artist albums ({table_name})",
)
except Exception:
logger.error(
"Failed to upgrade watch artists DB to 3.2.0 base schema", exc_info=True
)
def _ensure_creds_filesystem() -> None: def _ensure_creds_filesystem() -> None:
try: try:
BLOBS_DIR.mkdir(parents=True, exist_ok=True) BLOBS_DIR.mkdir(parents=True, exist_ok=True)
@@ -374,35 +47,10 @@ def run_migrations_if_needed():
return return
try: try:
# Require instance to be at least 3.2.0 on history DB; otherwise abort # Validate configuration version strictly at 3.3.0
with _safe_connect(HISTORY_DB) as history_conn: MigrationV3_3_0.assert_config_version_is_3_3_0()
if history_conn and not _is_history_at_least_3_2_0(history_conn):
logger.error(
"Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.3.0."
)
raise RuntimeError(
"Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.3.0."
)
# Watch playlists DB # No schema changes in 3.3.0 path; just ensure Accounts DB can be opened
with _safe_connect(PLAYLISTS_DB) as conn:
if conn:
_update_watch_playlists_db(conn)
# Apply 3.2.0 additions (batch progress columns)
if not m320.check_watch_playlists(conn):
m320.update_watch_playlists(conn)
conn.commit()
# Watch artists DB (if exists)
if ARTISTS_DB.exists():
with _safe_connect(ARTISTS_DB) as conn:
if conn:
_update_watch_artists_db(conn)
if not m320.check_watch_artists(conn):
m320.update_watch_artists(conn)
conn.commit()
# Accounts DB (no changes for this migration path)
with _safe_connect(ACCOUNTS_DB) as conn: with _safe_connect(ACCOUNTS_DB) as conn:
if conn: if conn:
conn.commit() conn.commit()
@@ -412,5 +60,4 @@ def run_migrations_if_needed():
raise raise
else: else:
_ensure_creds_filesystem() _ensure_creds_filesystem()
log_noop_migration_detected() logger.info("Migration validation completed (3.3.0 gate)")
logger.info("Database migrations check completed (3.2.0 -> 3.3.0 path)")

View File

@@ -1,100 +0,0 @@
import sqlite3
import logging
logger = logging.getLogger(__name__)
class MigrationV3_2_0:
"""
Migration for version 3.2.0 (upgrade path 3.2.0 -> 3.3.0).
- Adds per-item batch progress columns to Watch DBs to support page-by-interval processing.
- Enforces prerequisite: previous instance version must be 3.1.2 (validated by runner).
"""
# New columns to add to watched tables
PLAYLISTS_ADDED_COLUMNS: dict[str, str] = {
"batch_next_offset": "INTEGER DEFAULT 0",
"batch_processing_snapshot_id": "TEXT",
}
ARTISTS_ADDED_COLUMNS: dict[str, str] = {
"batch_next_offset": "INTEGER DEFAULT 0",
}
# --- No-op for history/accounts in 3.3.0 ---
def check_history(self, conn: sqlite3.Connection) -> bool:
return True
def update_history(self, conn: sqlite3.Connection) -> None:
pass
def check_accounts(self, conn: sqlite3.Connection) -> bool:
return True
def update_accounts(self, conn: sqlite3.Connection) -> None:
pass
# --- Watch: playlists ---
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
try:
cur = conn.execute("PRAGMA table_info(watched_playlists)")
cols = {row[1] for row in cur.fetchall()}
return set(self.PLAYLISTS_ADDED_COLUMNS.keys()).issubset(cols)
except sqlite3.OperationalError:
# Table missing means not ready
return False
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
# Add new columns if missing
try:
cur = conn.execute("PRAGMA table_info(watched_playlists)")
existing = {row[1] for row in cur.fetchall()}
for col_name, col_type in self.PLAYLISTS_ADDED_COLUMNS.items():
if col_name in existing:
continue
try:
conn.execute(
f"ALTER TABLE watched_playlists ADD COLUMN {col_name} {col_type}"
)
logger.info(
f"Added column '{col_name} {col_type}' to watched_playlists for 3.3.0 batch progress."
)
except sqlite3.OperationalError as e:
logger.warning(
f"Could not add column '{col_name}' to watched_playlists: {e}"
)
except Exception:
logger.error("Failed to update watched_playlists for 3.3.0", exc_info=True)
# --- Watch: artists ---
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
try:
cur = conn.execute("PRAGMA table_info(watched_artists)")
cols = {row[1] for row in cur.fetchall()}
return set(self.ARTISTS_ADDED_COLUMNS.keys()).issubset(cols)
except sqlite3.OperationalError:
return False
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
try:
cur = conn.execute("PRAGMA table_info(watched_artists)")
existing = {row[1] for row in cur.fetchall()}
for col_name, col_type in self.ARTISTS_ADDED_COLUMNS.items():
if col_name in existing:
continue
try:
conn.execute(
f"ALTER TABLE watched_artists ADD COLUMN {col_name} {col_type}"
)
logger.info(
f"Added column '{col_name} {col_type}' to watched_artists for 3.3.0 batch progress."
)
except sqlite3.OperationalError as e:
logger.warning(
f"Could not add column '{col_name}' to watched_artists: {e}"
)
except Exception:
logger.error("Failed to update watched_artists for 3.3.0", exc_info=True)

View File

@@ -1,41 +0,0 @@
import logging
import sqlite3
logger = logging.getLogger(__name__)
class MigrationV3_2_1:
"""
No-op migration for version 3.2.1 (upgrade path 3.2.1 -> 3.3.0).
No database schema changes are required.
"""
def check_history(self, conn: sqlite3.Connection) -> bool:
return True
def update_history(self, conn: sqlite3.Connection) -> None:
pass
def check_accounts(self, conn: sqlite3.Connection) -> bool:
return True
def update_accounts(self, conn: sqlite3.Connection) -> None:
pass
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
return True
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
pass
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
return True
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
pass
def log_noop_migration_detected() -> None:
logger.info(
"No migration performed: detected schema for 3.2.1; no changes needed for 3.2.1 -> 3.3.0."
)

View File

@@ -0,0 +1,69 @@
import json
import logging
from pathlib import Path
from typing import Optional
logger = logging.getLogger(__name__)
CONFIG_PATH = Path("./data/config/main.json")
REQUIRED_VERSION = "3.3.0"
TARGET_VERSION = "3.3.1"
def _load_config(config_path: Path) -> Optional[dict]:
try:
if not config_path.exists():
logger.error(f"Configuration file not found at {config_path}")
return None
content = config_path.read_text(encoding="utf-8")
return json.loads(content)
except Exception:
logger.error("Failed to read configuration file for migration", exc_info=True)
return None
def _save_config(config_path: Path, cfg: dict) -> None:
config_path.parent.mkdir(parents=True, exist_ok=True)
config_path.write_text(json.dumps(cfg, indent=4) + "\n", encoding="utf-8")
class MigrationV3_3_0:
"""
3.3.0 migration gate. This migration verifies the configuration indicates
version 3.3.0, then bumps it to 3.3.1.
If the `version` key is missing or not equal to 3.3.0, execution aborts and
prompts the user to update their instance to 3.3.0.
"""
@staticmethod
def assert_config_version_is_3_3_0() -> None:
cfg = _load_config(CONFIG_PATH)
if not cfg or "version" not in cfg:
raise RuntimeError(
"Missing 'version' in data/config/main.json. Please update your configuration to 3.3.0."
)
version = str(cfg.get("version", "")).strip()
# Case 1: exactly 3.3.0 -> bump to 3.3.1
if version == REQUIRED_VERSION:
cfg["version"] = TARGET_VERSION
try:
_save_config(CONFIG_PATH, cfg)
logger.info(
f"Configuration version bumped from {REQUIRED_VERSION} to {TARGET_VERSION}."
)
except Exception:
logger.error(
"Failed to bump configuration version to 3.3.1", exc_info=True
)
raise
return
# Case 2: already 3.3.1 -> OK
if version == TARGET_VERSION:
logger.info("Configuration version 3.3.1 detected. Proceeding.")
return
# Case 3: anything else -> abort and instruct to update to 3.3.0 first
raise RuntimeError(
f"Unsupported configuration version '{version}'. Please update to {REQUIRED_VERSION}."
)

View File

@@ -4,7 +4,7 @@ import logging
import time import time
import json import json
import asyncio import asyncio
from typing import Set from typing import Set, Optional
import redis import redis
import threading import threading
@@ -118,26 +118,22 @@ def start_sse_redis_subscriber():
# Handle different event types # Handle different event types
if event_type == "progress_update": if event_type == "progress_update":
# Transform callback data into task format expected by frontend # Transform callback data into standardized update format expected by frontend
loop = asyncio.new_event_loop() standardized = standardize_incoming_event(event_data)
asyncio.set_event_loop(loop) if standardized:
try: loop = asyncio.new_event_loop()
broadcast_data = loop.run_until_complete( asyncio.set_event_loop(loop)
transform_callback_to_task_format( try:
task_id, event_data
)
)
if broadcast_data:
loop.run_until_complete( loop.run_until_complete(
sse_broadcaster.broadcast_event(broadcast_data) sse_broadcaster.broadcast_event(standardized)
) )
logger.debug( logger.debug(
f"SSE Redis Subscriber: Broadcasted callback to {len(sse_broadcaster.clients)} clients" f"SSE Redis Subscriber: Broadcasted standardized progress update to {len(sse_broadcaster.clients)} clients"
) )
finally: finally:
loop.close() loop.close()
elif event_type == "summary_update": elif event_type == "summary_update":
# Task summary update - use existing trigger_sse_update logic # Task summary update - use standardized trigger
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop) asyncio.set_event_loop(loop)
try: try:
@@ -152,18 +148,20 @@ def start_sse_redis_subscriber():
finally: finally:
loop.close() loop.close()
else: else:
# Unknown event type - broadcast as-is # Unknown event type - attempt to standardize and broadcast
loop = asyncio.new_event_loop() standardized = standardize_incoming_event(event_data)
asyncio.set_event_loop(loop) if standardized:
try: loop = asyncio.new_event_loop()
loop.run_until_complete( asyncio.set_event_loop(loop)
sse_broadcaster.broadcast_event(event_data) try:
) loop.run_until_complete(
logger.debug( sse_broadcaster.broadcast_event(standardized)
f"SSE Redis Subscriber: Broadcasted {event_type} to {len(sse_broadcaster.clients)} clients" )
) logger.debug(
finally: f"SSE Redis Subscriber: Broadcasted standardized {event_type} to {len(sse_broadcaster.clients)} clients"
loop.close() )
finally:
loop.close()
except Exception as e: except Exception as e:
logger.error( logger.error(
@@ -180,6 +178,85 @@ def start_sse_redis_subscriber():
logger.debug("SSE Redis Subscriber: Background thread started") logger.debug("SSE Redis Subscriber: Background thread started")
def build_task_object_from_callback(
task_id: str, callback_data: dict
) -> Optional[dict]:
"""Build a standardized task object from callback payload and task info."""
try:
task_info = get_task_info(task_id)
if not task_info:
return None
return {
"task_id": task_id,
"original_url": f"http://localhost:7171/api/{task_info.get('download_type', 'track')}/download/{task_info.get('url', '').split('/')[-1] if task_info.get('url') else ''}",
"last_line": callback_data,
"timestamp": time.time(),
"download_type": task_info.get("download_type", "track"),
"type": task_info.get("type", task_info.get("download_type", "track")),
"name": task_info.get("name", "Unknown"),
"artist": task_info.get("artist", ""),
"created_at": task_info.get("created_at"),
}
except Exception as e:
logger.error(
f"Error building task object from callback for {task_id}: {e}",
exc_info=True,
)
return None
def standardize_incoming_event(event_data: dict) -> Optional[dict]:
"""
Convert various incoming event shapes into a standardized SSE payload:
{
'change_type': 'update' | 'heartbeat',
'tasks': [...],
'current_timestamp': float,
'trigger_reason': str (optional)
}
"""
try:
# Heartbeat passthrough (ensure tasks array exists)
if event_data.get("change_type") == "heartbeat":
return {
"change_type": "heartbeat",
"tasks": [],
"current_timestamp": time.time(),
}
# If already has tasks, just coerce change_type
if isinstance(event_data.get("tasks"), list):
return {
"change_type": event_data.get("change_type", "update"),
"tasks": event_data["tasks"],
"current_timestamp": time.time(),
"trigger_reason": event_data.get("trigger_reason"),
}
# If it's a callback-shaped event
callback_data = event_data.get("callback_data")
task_id = event_data.get("task_id")
if callback_data and task_id:
task_obj = build_task_object_from_callback(task_id, callback_data)
if task_obj:
return {
"change_type": "update",
"tasks": [task_obj],
"current_timestamp": time.time(),
"trigger_reason": event_data.get("event_type", "callback_update"),
}
# Fallback to empty update
return {
"change_type": "update",
"tasks": [],
"current_timestamp": time.time(),
}
except Exception as e:
logger.error(f"Failed to standardize incoming event: {e}", exc_info=True)
return None
async def transform_callback_to_task_format(task_id: str, event_data: dict) -> dict: async def transform_callback_to_task_format(task_id: str, event_data: dict) -> dict:
"""Transform callback event data into the task format expected by frontend""" """Transform callback event data into the task format expected by frontend"""
try: try:
@@ -210,7 +287,7 @@ async def transform_callback_to_task_format(task_id: str, event_data: dict) -> d
# Build minimal event data - global counts will be added at broadcast time # Build minimal event data - global counts will be added at broadcast time
return { return {
"change_type": "update", # Use "update" so it gets processed by existing frontend logic "change_type": "update",
"tasks": [task_object], # Frontend expects tasks array "tasks": [task_object], # Frontend expects tasks array
"current_timestamp": time.time(), "current_timestamp": time.time(),
"updated_count": 1, "updated_count": 1,
@@ -253,12 +330,12 @@ async def trigger_sse_update(task_id: str, reason: str = "task_update"):
task_info, last_status, task_id, current_time, dummy_request task_info, last_status, task_id, current_time, dummy_request
) )
# Create minimal event data - global counts will be added at broadcast time # Create standardized event data - global counts will be added at broadcast time
event_data = { event_data = {
"tasks": [task_response], "tasks": [task_response],
"current_timestamp": current_time, "current_timestamp": current_time,
"since_timestamp": current_time, "since_timestamp": current_time,
"change_type": "realtime", "change_type": "update",
"trigger_reason": reason, "trigger_reason": reason,
} }
@@ -419,6 +496,14 @@ def add_global_task_counts_to_event(event_data):
event_data["active_tasks"] = global_task_counts["active"] event_data["active_tasks"] = global_task_counts["active"]
event_data["all_tasks_count"] = sum(global_task_counts.values()) event_data["all_tasks_count"] = sum(global_task_counts.values())
# Ensure tasks array is present for schema consistency
if "tasks" not in event_data:
event_data["tasks"] = []
# Ensure change_type is present
if "change_type" not in event_data:
event_data["change_type"] = "update"
return event_data return event_data
except Exception as e: except Exception as e:
@@ -495,7 +580,11 @@ def _build_task_response(
try: try:
item_id = item_url.split("/")[-1] item_id = item_url.split("/")[-1]
if item_id: if item_id:
base_url = str(request.base_url).rstrip("/") if request else "http://localhost:7171" base_url = (
str(request.base_url).rstrip("/")
if request
else "http://localhost:7171"
)
dynamic_original_url = ( dynamic_original_url = (
f"{base_url}/api/{download_type}/download/{item_id}" f"{base_url}/api/{download_type}/download/{item_id}"
) )
@@ -573,7 +662,9 @@ def _build_task_response(
return task_response return task_response
async def get_paginated_tasks(page=1, limit=20, active_only=False, request: Optional[Request] = None): async def get_paginated_tasks(
page=1, limit=20, active_only=False, request: Optional[Request] = None
):
""" """
Get paginated list of tasks. Get paginated list of tasks.
""" """
@@ -1066,47 +1157,18 @@ async def stream_task_updates(
try: try:
# Register this client with the broadcaster # Register this client with the broadcaster
logger.debug(f"SSE Stream: New client connecting...") logger.debug("SSE Stream: New client connecting...")
await sse_broadcaster.add_client(client_queue) await sse_broadcaster.add_client(client_queue)
logger.debug(f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}") logger.debug(
f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}"
)
# Send initial data immediately upon connection # Send initial data immediately upon connection (standardized 'update')
initial_data = await generate_task_update_event( initial_data = await generate_task_update_event(
time.time(), active_only, request time.time(), active_only, request
) )
yield initial_data yield initial_data
# Also send any active tasks as callback-style events to newly connected clients
all_tasks = get_all_tasks()
for task_summary in all_tasks:
task_id = task_summary.get("task_id")
if not task_id:
continue
task_info = get_task_info(task_id)
if not task_info:
continue
last_status = get_last_task_status(task_id)
task_status = get_task_status_from_last_status(last_status)
# Send recent callback data for active or recently completed tasks
if is_task_active(task_status) or (
last_status and last_status.get("timestamp", 0) > time.time() - 30
):
if last_status and "raw_callback" in last_status:
callback_event = {
"task_id": task_id,
"callback_data": last_status["raw_callback"],
"timestamp": last_status.get("timestamp", time.time()),
"change_type": "callback",
"event_type": "progress_update",
"replay": True, # Mark as replay for client
}
event_json = json.dumps(callback_event)
yield f"data: {event_json}\n\n"
logger.debug(f"SSE Stream: Sent replay callback for task {task_id}")
# Send periodic heartbeats and listen for real-time events # Send periodic heartbeats and listen for real-time events
last_heartbeat = time.time() last_heartbeat = time.time()
heartbeat_interval = 30.0 heartbeat_interval = 30.0
@@ -1173,6 +1235,7 @@ async def stream_task_updates(
+ task_counts["retrying"], + task_counts["retrying"],
"task_counts": task_counts, "task_counts": task_counts,
"change_type": "heartbeat", "change_type": "heartbeat",
"tasks": [],
} }
event_json = json.dumps(heartbeat_data) event_json = json.dumps(heartbeat_data)
@@ -1187,6 +1250,7 @@ async def stream_task_updates(
"error": "Internal server error", "error": "Internal server error",
"timestamp": time.time(), "timestamp": time.time(),
"change_type": "error", "change_type": "error",
"tasks": [],
} }
) )
yield f"data: {error_data}\n\n" yield f"data: {error_data}\n\n"
@@ -1289,6 +1353,7 @@ async def generate_task_update_event(
"current_timestamp": current_time, "current_timestamp": current_time,
"updated_count": len(updated_tasks), "updated_count": len(updated_tasks),
"since_timestamp": since_timestamp, "since_timestamp": since_timestamp,
"change_type": "update",
"initial": True, # Mark as initial load "initial": True, # Mark as initial load
} }
@@ -1301,7 +1366,12 @@ async def generate_task_update_event(
except Exception as e: except Exception as e:
logger.error(f"Error generating initial SSE event: {e}", exc_info=True) logger.error(f"Error generating initial SSE event: {e}", exc_info=True)
error_data = json.dumps( error_data = json.dumps(
{"error": "Failed to load initial data", "timestamp": time.time()} {
"error": "Failed to load initial data",
"timestamp": time.time(),
"tasks": [],
"change_type": "error",
}
) )
return f"data: {error_data}\n\n" return f"data: {error_data}\n\n"

View File

@@ -101,7 +101,7 @@ def download_album(
) )
dl.download_albumspo( dl.download_albumspo(
link_album=url, # Spotify URL link_album=url, # Spotify URL
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, # Deezer quality quality_download=quality, # Deezer quality
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -159,7 +159,7 @@ def download_album(
) )
spo.download_album( spo.download_album(
link_album=url, # Spotify URL link_album=url, # Spotify URL
output_dir="/app/downloads", output_dir="./downloads",
quality_download=fall_quality, # Spotify quality quality_download=fall_quality, # Spotify quality
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -216,7 +216,7 @@ def download_album(
) )
spo.download_album( spo.download_album(
link_album=url, link_album=url,
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -260,7 +260,7 @@ def download_album(
) )
dl.download_albumdee( # Deezer URL, download via Deezer dl.download_albumdee( # Deezer URL, download via Deezer
link_album=url, link_album=url,
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,

View File

@@ -28,7 +28,7 @@ CONFIG_FILE_PATH = Path("./data/config/main.json")
DEFAULT_MAIN_CONFIG = { DEFAULT_MAIN_CONFIG = {
"service": "spotify", "service": "spotify",
"version": "3.3.0", "version": "3.3.1",
"spotify": "", "spotify": "",
"deezer": "", "deezer": "",
"fallback": False, "fallback": False,

View File

@@ -98,7 +98,7 @@ def download_playlist(
) )
dl.download_playlistspo( dl.download_playlistspo(
link_playlist=url, # Spotify URL link_playlist=url, # Spotify URL
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, # Deezer quality quality_download=quality, # Deezer quality
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -161,7 +161,7 @@ def download_playlist(
) )
spo.download_playlist( spo.download_playlist(
link_playlist=url, # Spotify URL link_playlist=url, # Spotify URL
output_dir="/app/downloads", output_dir="./downloads",
quality_download=fall_quality, # Spotify quality quality_download=fall_quality, # Spotify quality
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -224,7 +224,7 @@ def download_playlist(
) )
spo.download_playlist( spo.download_playlist(
link_playlist=url, link_playlist=url,
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -268,7 +268,7 @@ def download_playlist(
) )
dl.download_playlistdee( # Deezer URL, download via Deezer dl.download_playlistdee( # Deezer URL, download via Deezer
link_playlist=url, link_playlist=url,
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=recursive_quality, # Usually False for playlists to get individual track qualities recursive_quality=recursive_quality, # Usually False for playlists to get individual track qualities
recursive_download=False, recursive_download=False,

View File

@@ -94,7 +94,7 @@ def download_track(
# download_trackspo means: Spotify URL, download via Deezer # download_trackspo means: Spotify URL, download via Deezer
dl.download_trackspo( dl.download_trackspo(
link_track=url, # Spotify URL link_track=url, # Spotify URL
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, # Deezer quality quality_download=quality, # Deezer quality
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -153,7 +153,7 @@ def download_track(
) )
spo.download_track( spo.download_track(
link_track=url, # Spotify URL link_track=url, # Spotify URL
output_dir="/app/downloads", output_dir="./downloads",
quality_download=fall_quality, # Spotify quality quality_download=fall_quality, # Spotify quality
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -211,7 +211,7 @@ def download_track(
) )
spo.download_track( spo.download_track(
link_track=url, link_track=url,
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,
@@ -254,7 +254,7 @@ def download_track(
) )
dl.download_trackdee( # Deezer URL, download via Deezer dl.download_trackdee( # Deezer URL, download via Deezer
link_track=url, link_track=url,
output_dir="/app/downloads", output_dir="./downloads",
quality_download=quality, quality_download=quality,
recursive_quality=recursive_quality, recursive_quality=recursive_quality,
recursive_download=False, recursive_download=False,

View File

@@ -1098,7 +1098,7 @@ def update_playlist_m3u_file(playlist_spotify_id: str):
# Get configuration settings # Get configuration settings
output_dir = ( output_dir = (
"/app/downloads" # This matches the output_dir used in download functions "./downloads" # This matches the output_dir used in download functions
) )
# Get all tracks for the playlist # Get all tracks for the playlist
@@ -1125,14 +1125,14 @@ def update_playlist_m3u_file(playlist_spotify_id: str):
skipped_missing_final_path = 0 skipped_missing_final_path = 0
for track in tracks: for track in tracks:
# Use final_path from deezspot summary and convert from /app/downloads to ../ relative path # Use final_path from deezspot summary and convert from ./downloads to ../ relative path
final_path = track.get("final_path") final_path = track.get("final_path")
if not final_path: if not final_path:
skipped_missing_final_path += 1 skipped_missing_final_path += 1
continue continue
normalized = str(final_path).replace("\\", "/") normalized = str(final_path).replace("\\", "/")
if normalized.startswith("/app/downloads/"): if normalized.startswith("./downloads/"):
relative_path = normalized.replace("/app/downloads/", "../", 1) relative_path = normalized.replace("./downloads/", "../", 1)
elif "/downloads/" in normalized.lower(): elif "/downloads/" in normalized.lower():
idx = normalized.lower().rfind("/downloads/") idx = normalized.lower().rfind("/downloads/")
relative_path = "../" + normalized[idx + len("/downloads/") :] relative_path = "../" + normalized[idx + len("/downloads/") :]

View File

@@ -1,7 +1,7 @@
{ {
"name": "spotizerr-ui", "name": "spotizerr-ui",
"private": true, "private": true,
"version": "3.3.0", "version": "3.3.1",
"type": "module", "type": "module",
"scripts": { "scripts": {
"dev": "vite", "dev": "vite",

View File

@@ -772,7 +772,7 @@ export const Queue = () => {
const priorities = { const priorities = {
"real-time": 1, downloading: 2, processing: 3, initializing: 4, "real-time": 1, downloading: 2, processing: 3, initializing: 4,
retrying: 5, queued: 6, done: 7, completed: 7, error: 8, cancelled: 9, skipped: 10 retrying: 5, queued: 6, done: 7, completed: 7, error: 8, cancelled: 9, skipped: 10
}; } as Record<string, number>;
return priorities[status as keyof typeof priorities] || 10; return priorities[status as keyof typeof priorities] || 10;
}; };

View File

@@ -103,6 +103,7 @@ export function AccountsTab() {
}, },
onError: (error) => { onError: (error) => {
const msg = extractApiErrorMessage(error); const msg = extractApiErrorMessage(error);
toast.error(msg);
}, },
}); });

View File

@@ -10,7 +10,7 @@ import {
} from "./queue-context"; } from "./queue-context";
import { toast } from "sonner"; import { toast } from "sonner";
import { v4 as uuidv4 } from "uuid"; import { v4 as uuidv4 } from "uuid";
import type { CallbackObject } from "@/types/callbacks"; import type { CallbackObject, SummaryObject, IDs } from "@/types/callbacks";
import { useAuth } from "@/contexts/auth-context"; import { useAuth } from "@/contexts/auth-context";
export function QueueProvider({ children }: { children: ReactNode }) { export function QueueProvider({ children }: { children: ReactNode }) {
@@ -43,51 +43,86 @@ export function QueueProvider({ children }: { children: ReactNode }) {
return items.filter(item => isActiveStatus(getStatus(item))).length; return items.filter(item => isActiveStatus(getStatus(item))).length;
}, [items]); }, [items]);
// Improved deduplication - check both id and taskId fields const extractIDs = useCallback((cb?: CallbackObject): IDs | undefined => {
const itemExists = useCallback((taskId: string, items: QueueItem[]): boolean => { if (!cb) return undefined;
return items.some(item => if ((cb as any).track) return (cb as any).track.ids as IDs;
item.id === taskId || if ((cb as any).album) return (cb as any).album.ids as IDs;
item.taskId === taskId || if ((cb as any).playlist) return (cb as any).playlist.ids as IDs;
// Also check spotify ID to prevent same track being added multiple times return undefined;
(item.spotifyId && item.spotifyId === taskId)
);
}, []); }, []);
// Convert SSE task data to QueueItem // Convert SSE task data to QueueItem
const createQueueItemFromTask = useCallback((task: any): QueueItem => { const createQueueItemFromTask = useCallback((task: any): QueueItem => {
const spotifyId = task.original_url?.split("/").pop() || ""; const lastCallback = task.last_line as CallbackObject | undefined;
const ids = extractIDs(lastCallback);
// Determine container type up-front
const downloadType = (task.download_type || task.type || "track") as DownloadType;
// Compute spotifyId fallback chain
const fallbackFromUrl = task.original_url?.split("/").pop() || "";
const spotifyId = ids?.spotify || fallbackFromUrl || "";
// Extract display info from callback // Extract display info from callback
let name = task.name || "Unknown"; let name: string = task.name || "Unknown";
let artist = task.artist || ""; let artist: string = task.artist || "";
// Handle different callback structures try {
if (task.last_line) { if (lastCallback) {
try { if ((lastCallback as any).track) {
if ("track" in task.last_line) { // Prefer parent container title if this is an album/playlist operation
name = task.last_line.track.title || name; const parent = (lastCallback as any).parent;
artist = task.last_line.track.artists?.[0]?.name || artist; if (downloadType === "playlist" && parent && (parent as any).title) {
} else if ("album" in task.last_line) { name = (parent as any).title || name;
name = task.last_line.album.title || name; artist = (parent as any).owner?.name || artist;
artist = task.last_line.album.artists?.map((a: any) => a.name).join(", ") || artist; } else if (downloadType === "album" && parent && (parent as any).title) {
} else if ("playlist" in task.last_line) { name = (parent as any).title || name;
name = task.last_line.playlist.title || name; const arts = (parent as any).artists || [];
artist = task.last_line.playlist.owner?.name || artist; artist = Array.isArray(arts) && arts.length > 0 ? (arts.map((a: any) => a.name).filter(Boolean).join(", ")) : artist;
} else {
// Fallback to the current track's info for standalone track downloads
name = (lastCallback as any).track.title || name;
const arts = (lastCallback as any).track.artists || [];
artist = Array.isArray(arts) && arts.length > 0 ? (arts.map((a: any) => a.name).filter(Boolean).join(", ")) : artist;
}
} else if ((lastCallback as any).album) {
name = (lastCallback as any).album.title || name;
const arts = (lastCallback as any).album.artists || [];
artist = Array.isArray(arts) && arts.length > 0 ? (arts.map((a: any) => a.name).filter(Boolean).join(", ")) : artist;
} else if ((lastCallback as any).playlist) {
name = (lastCallback as any).playlist.title || name;
artist = (lastCallback as any).playlist.owner?.name || artist;
} else if ((lastCallback as any).status === "processing") {
name = (lastCallback as any).name || name;
artist = (lastCallback as any).artist || artist;
} }
} catch (error) {
console.warn(`createQueueItemFromTask: Error parsing callback for task ${task.task_id}:`, error);
} }
} catch (error) {
console.warn(`createQueueItemFromTask: Error parsing callback for task ${task.task_id}:`, error);
}
// Prefer summary from callback status_info if present; fallback to task.summary
let summary: SummaryObject | undefined = undefined;
try {
const statusInfo = (lastCallback as any)?.status_info;
if (statusInfo && typeof statusInfo === "object" && "summary" in statusInfo) {
summary = (statusInfo as any).summary || undefined;
}
} catch {}
if (!summary && task.summary) {
summary = task.summary as SummaryObject;
} }
const queueItem: QueueItem = { const queueItem: QueueItem = {
id: task.task_id, id: task.task_id,
taskId: task.task_id, taskId: task.task_id,
downloadType: task.download_type || task.type || "track", downloadType,
spotifyId, spotifyId,
lastCallback: task.last_line as CallbackObject, ids,
lastCallback: lastCallback as CallbackObject,
name, name,
artist, artist,
summary: task.summary, summary,
error: task.error, error: task.error,
}; };
@@ -98,7 +133,7 @@ export function QueueProvider({ children }: { children: ReactNode }) {
} }
return queueItem; return queueItem;
}, []); }, [extractIDs]);
// Schedule auto-removal for completed tasks // Schedule auto-removal for completed tasks
const scheduleRemoval = useCallback((taskId: string, delay: number = 10000) => { const scheduleRemoval = useCallback((taskId: string, delay: number = 10000) => {
@@ -162,7 +197,7 @@ export function QueueProvider({ children }: { children: ReactNode }) {
sseConnection.current = eventSource; sseConnection.current = eventSource;
eventSource.onopen = () => { eventSource.onopen = () => {
console.log("SSE connected successfully"); console.log("SSE connected successfully");
reconnectAttempts.current = 0; reconnectAttempts.current = 0;
lastHeartbeat.current = Date.now(); lastHeartbeat.current = Date.now();
@@ -172,44 +207,44 @@ export function QueueProvider({ children }: { children: ReactNode }) {
clearTimeout(reconnectTimeoutRef.current); clearTimeout(reconnectTimeoutRef.current);
reconnectTimeoutRef.current = null; reconnectTimeoutRef.current = null;
} }
}; };
eventSource.onmessage = (event) => { eventSource.onmessage = (event) => {
try { try {
const data = JSON.parse(event.data); const data = JSON.parse(event.data);
// Debug logging for all SSE events // Debug logging for all SSE events
console.log("🔄 SSE Event Received:", { console.log("🔄 SSE Event Received:", {
timestamp: new Date().toISOString(), timestamp: new Date().toISOString(),
changeType: data.change_type || "update", changeType: data.change_type || "update",
totalTasks: data.total_tasks, totalTasks: data.total_tasks,
taskCounts: data.task_counts, taskCounts: data.task_counts,
tasksCount: data.tasks?.length || 0, tasksCount: data.tasks?.length || 0,
taskIds: data.tasks?.map((t: any) => { taskIds: data.tasks?.map((t: any) => {
const tempItem = createQueueItemFromTask(t); const tempItem = createQueueItemFromTask(t);
const status = getStatus(tempItem); const status = getStatus(tempItem);
// Special logging for playlist/album track progress // Special logging for playlist/album track progress
if (t.last_line?.current_track && t.last_line?.total_tracks) { if (t.last_line?.current_track && t.last_line?.total_tracks) {
return { return {
id: t.task_id, id: t.task_id,
status, status,
type: t.download_type, type: t.download_type,
track: `${t.last_line.current_track}/${t.last_line.total_tracks}`, track: `${t.last_line.current_track}/${t.last_line.total_tracks}`,
trackStatus: t.last_line.status_info?.status trackStatus: t.last_line.status_info?.status
}; };
} }
return { id: t.task_id, status, type: t.download_type }; return { id: t.task_id, status, type: t.download_type };
}) || [], }) || [],
rawData: data rawData: data
}); });
if (data.error) { if (data.error) {
console.error("SSE error:", data.error); console.error("SSE error:", data.error);
toast.error("Connection error"); toast.error("Connection error");
return; return;
} }
// Handle different message types from optimized backend // Handle message types from backend
const changeType = data.change_type || "update"; const changeType = data.change_type || "update";
const triggerReason = data.trigger_reason || ""; const triggerReason = data.trigger_reason || "";
@@ -221,7 +256,6 @@ export function QueueProvider({ children }: { children: ReactNode }) {
(total_tasks || 0); (total_tasks || 0);
setTotalTasks(calculatedTotal); setTotalTasks(calculatedTotal);
lastHeartbeat.current = Date.now(); lastHeartbeat.current = Date.now();
// Reduce heartbeat logging noise - only log every 10th heartbeat
if (Math.random() < 0.1) { if (Math.random() < 0.1) {
console.log("SSE: Connection active (heartbeat)"); console.log("SSE: Connection active (heartbeat)");
} }
@@ -249,9 +283,10 @@ export function QueueProvider({ children }: { children: ReactNode }) {
setItems(prev => { setItems(prev => {
// Create improved deduplication maps // Create improved deduplication maps
const existingTaskIds = new Set(); const existingTaskIds = new Set<string>();
const existingSpotifyIds = new Set(); const existingSpotifyIds = new Set<string>();
const existingItemsMap = new Map(); const existingDeezerIds = new Set<string>();
const existingItemsMap = new Map<string, QueueItem>();
prev.forEach(item => { prev.forEach(item => {
if (item.id) { if (item.id) {
@@ -263,6 +298,7 @@ export function QueueProvider({ children }: { children: ReactNode }) {
existingItemsMap.set(item.taskId, item); existingItemsMap.set(item.taskId, item);
} }
if (item.spotifyId) existingSpotifyIds.add(item.spotifyId); if (item.spotifyId) existingSpotifyIds.add(item.spotifyId);
if (item.ids?.deezer) existingDeezerIds.add(item.ids.deezer);
}); });
// Process each updated task // Process each updated task
@@ -271,33 +307,37 @@ export function QueueProvider({ children }: { children: ReactNode }) {
const newTasksToAdd: QueueItem[] = []; const newTasksToAdd: QueueItem[] = [];
for (const task of updatedTasks) { for (const task of updatedTasks) {
const taskId = task.task_id; const taskId = task.task_id as string;
const spotifyId = task.original_url?.split("/").pop();
// Skip if already processed (shouldn't happen but safety check) // Skip if already processed (shouldn't happen but safety check)
if (processedTaskIds.has(taskId)) continue; if (processedTaskIds.has(taskId)) continue;
processedTaskIds.add(taskId); processedTaskIds.add(taskId);
// Check if this task exists in current queue // Check if this task exists in current queue
const existingItem = existingItemsMap.get(taskId) || const existingItem = existingItemsMap.get(taskId);
Array.from(existingItemsMap.values()).find(item => const newItemCandidate = createQueueItemFromTask(task);
item.spotifyId === spotifyId const candidateSpotify = newItemCandidate.spotifyId;
); const candidateDeezer = newItemCandidate.ids?.deezer;
if (existingItem) { // If not found by id, try to match by identifiers
const existingById = existingItem || Array.from(existingItemsMap.values()).find(item =>
(candidateSpotify && item.spotifyId === candidateSpotify) ||
(candidateDeezer && item.ids?.deezer === candidateDeezer)
);
if (existingById) {
// Skip SSE updates for items that are already cancelled by user action // Skip SSE updates for items that are already cancelled by user action
const existingStatus = getStatus(existingItem); const existingStatus = getStatus(existingById);
if (existingStatus === "cancelled" && existingItem.error === "Cancelled by user") { if (existingStatus === "cancelled" && existingById.error === "Cancelled by user") {
console.log(`SSE: Skipping update for user-cancelled task ${taskId}`); console.log(`SSE: Skipping update for user-cancelled task ${taskId}`);
continue; continue;
} }
// Update existing item // Update existing item
const updatedItem = createQueueItemFromTask(task); const updatedItem = newItemCandidate;
const status = getStatus(updatedItem); const status = getStatus(updatedItem);
const previousStatus = getStatus(existingItem); const previousStatus = getStatus(existingById);
// Only log significant status changes
if (previousStatus !== status) { if (previousStatus !== status) {
console.log(`SSE: Status change ${taskId}: ${previousStatus}${status}`); console.log(`SSE: Status change ${taskId}: ${previousStatus}${status}`);
} }
@@ -305,33 +345,32 @@ export function QueueProvider({ children }: { children: ReactNode }) {
// Schedule removal for terminal states // Schedule removal for terminal states
if (isTerminalStatus(status)) { if (isTerminalStatus(status)) {
const delay = status === "cancelled" ? 5000 : 10000; const delay = status === "cancelled" ? 5000 : 10000;
scheduleRemoval(existingItem.id, delay); scheduleRemoval(existingById.id, delay);
console.log(`SSE: Scheduling removal for terminal task ${taskId} (${status}) in ${delay}ms`); console.log(`SSE: Scheduling removal for terminal task ${taskId} (${status}) in ${delay}ms`);
} }
updatedItems.push(updatedItem); updatedItems.push(updatedItem);
} else { } else {
// This is a new task from SSE // This is a new task from SSE
const newItem = createQueueItemFromTask(task); const newItem = newItemCandidate;
const status = getStatus(newItem); const status = getStatus(newItem);
// Check for duplicates by spotify ID // Check for duplicates by identifiers
if (spotifyId && existingSpotifyIds.has(spotifyId)) { if ((candidateSpotify && existingSpotifyIds.has(candidateSpotify)) ||
console.log(`SSE: Skipping duplicate by spotify ID: ${spotifyId}`); (candidateDeezer && existingDeezerIds.has(candidateDeezer))) {
console.log(`SSE: Skipping duplicate by identifier: ${candidateSpotify || candidateDeezer}`);
continue; continue;
} }
// Check if this is a pending download // Check if this is a pending download (by spotify id for now)
if (pendingDownloads.current.has(spotifyId || taskId)) { if (pendingDownloads.current.has(candidateSpotify || newItem.id)) {
console.log(`SSE: Skipping pending download: ${taskId}`); console.log(`SSE: Skipping pending download: ${taskId}`);
continue; continue;
} }
// For terminal tasks from SSE, these should be tasks that just transitioned // For terminal tasks from SSE
// (backend now filters out already-terminal tasks)
if (isTerminalStatus(status)) { if (isTerminalStatus(status)) {
console.log(`SSE: Adding recently completed task: ${taskId} (${status})`); console.log(`SSE: Adding recently completed task: ${taskId} (${status})`);
// Schedule immediate removal for terminal tasks
const delay = status === "cancelled" ? 5000 : 10000; const delay = status === "cancelled" ? 5000 : 10000;
scheduleRemoval(newItem.id, delay); scheduleRemoval(newItem.id, delay);
} else if (isActiveStatus(status)) { } else if (isActiveStatus(status)) {
@@ -349,7 +388,9 @@ export function QueueProvider({ children }: { children: ReactNode }) {
const finalItems = prev.map(item => { const finalItems = prev.map(item => {
const updated = updatedItems.find(u => const updated = updatedItems.find(u =>
u.id === item.id || u.taskId === item.id || u.id === item.id || u.taskId === item.id ||
u.id === item.taskId || u.taskId === item.taskId u.id === item.taskId || u.taskId === item.taskId ||
(u.spotifyId && u.spotifyId === item.spotifyId) ||
(u.ids?.deezer && u.ids.deezer === item.ids?.deezer)
); );
return updated || item; return updated || item;
}); });
@@ -360,69 +401,69 @@ export function QueueProvider({ children }: { children: ReactNode }) {
} else if (changeType === "update") { } else if (changeType === "update") {
// Update received but no tasks - might be count updates only // Update received but no tasks - might be count updates only
console.log("SSE: Received update with count changes only"); console.log("SSE: Received update with count changes only");
}
} catch (error) {
console.error("Failed to parse SSE message:", error, event.data);
} }
}; } catch (error) {
console.error("Failed to parse SSE message:", error, event.data);
}
};
eventSource.onerror = (error) => { eventSource.onerror = (error) => {
// Use appropriate logging level - first attempt failures are common and expected // Use appropriate logging level - first attempt failures are common and expected
if (reconnectAttempts.current === 0) { if (reconnectAttempts.current === 0) {
console.log("SSE initial connection failed, will retry shortly..."); console.log("SSE initial connection failed, will retry shortly...");
} else { } else {
console.warn("SSE connection error:", error); console.warn("SSE connection error:", error);
}
// Only check for auth errors if auth is enabled
if (authEnabled) {
const token = authApiClient.getToken();
if (!token) {
console.warn("SSE: Connection error and no auth token - stopping reconnection attempts");
eventSource.close();
sseConnection.current = null;
stopHealthCheck();
return;
}
}
eventSource.close();
sseConnection.current = null;
if (reconnectAttempts.current < maxReconnectAttempts) {
reconnectAttempts.current++;
// Use shorter delays for faster recovery, especially on first attempts
const baseDelay = reconnectAttempts.current === 1 ? 100 : 1000;
const delay = Math.min(baseDelay * Math.pow(2, reconnectAttempts.current - 1), 15000);
if (reconnectAttempts.current === 1) {
console.log("SSE: Retrying connection shortly...");
} else {
console.log(`SSE: Reconnecting in ${delay}ms (attempt ${reconnectAttempts.current}/${maxReconnectAttempts})`);
}
reconnectTimeoutRef.current = window.setTimeout(() => {
if (reconnectAttempts.current === 1) {
console.log("SSE: Attempting reconnection...");
} else {
console.log("SSE: Attempting to reconnect...");
}
connectSSE();
}, delay);
} else {
console.error("SSE: Max reconnection attempts reached");
toast.error("Connection lost. Please refresh the page.");
}
};
} catch (error) {
console.log("Initial SSE connection setup failed, will retry:", error);
// Don't show toast for initial connection failures since they often recover quickly
if (reconnectAttempts.current > 0) {
toast.error("Failed to establish connection");
} }
// Only check for auth errors if auth is enabled
if (authEnabled) {
const token = authApiClient.getToken();
if (!token) {
console.warn("SSE: Connection error and no auth token - stopping reconnection attempts");
eventSource.close();
sseConnection.current = null;
stopHealthCheck();
return;
}
}
eventSource.close();
sseConnection.current = null;
if (reconnectAttempts.current < maxReconnectAttempts) {
reconnectAttempts.current++;
// Use shorter delays for faster recovery, especially on first attempts
const baseDelay = reconnectAttempts.current === 1 ? 100 : 1000;
const delay = Math.min(baseDelay * Math.pow(2, reconnectAttempts.current - 1), 15000);
if (reconnectAttempts.current === 1) {
console.log("SSE: Retrying connection shortly...");
} else {
console.log(`SSE: Reconnecting in ${delay}ms (attempt ${reconnectAttempts.current}/${maxReconnectAttempts})`);
}
reconnectTimeoutRef.current = window.setTimeout(() => {
if (reconnectAttempts.current === 1) {
console.log("SSE: Attempting reconnection...");
} else {
console.log("SSE: Attempting to reconnect...");
}
connectSSE();
}, delay);
} else {
console.error("SSE: Max reconnection attempts reached");
toast.error("Connection lost. Please refresh the page.");
}
};
} catch (error) {
console.log("Initial SSE connection setup failed, will retry:", error);
// Don't show toast for initial connection failures since they often recover quickly
if (reconnectAttempts.current > 0) {
toast.error("Failed to establish connection");
} }
}, [createQueueItemFromTask, scheduleRemoval, startHealthCheck, authEnabled]); }
}, [createQueueItemFromTask, startHealthCheck, authEnabled, stopHealthCheck]);
const disconnectSSE = useCallback(() => { const disconnectSSE = useCallback(() => {
if (sseConnection.current) { if (sseConnection.current) {
@@ -449,17 +490,19 @@ export function QueueProvider({ children }: { children: ReactNode }) {
if (newTasks.length > 0) { if (newTasks.length > 0) {
setItems(prev => { setItems(prev => {
const uniqueNewTasks = newTasks const extended = newTasks
.filter((task: any) => !itemExists(task.task_id, prev)) .map((task: any) => createQueueItemFromTask(task))
.filter((task: any) => { .filter((qi: QueueItem) => {
const tempItem = createQueueItemFromTask(task); const status = getStatus(qi);
const status = getStatus(tempItem);
// Consistent filtering - exclude all terminal state tasks in pagination too // Consistent filtering - exclude all terminal state tasks in pagination too
return !isTerminalStatus(status); if (isTerminalStatus(status)) return false;
}) // Dedupe by task id or identifiers
.map((task: any) => createQueueItemFromTask(task)); if (prev.some(p => p.id === qi.id || p.taskId === qi.id)) return false;
if (qi.spotifyId && prev.some(p => p.spotifyId === qi.spotifyId)) return false;
return [...prev, ...uniqueNewTasks]; if (qi.ids?.deezer && prev.some(p => p.ids?.deezer === qi.ids?.deezer)) return false;
return true;
});
return [...prev, ...extended];
}); });
setCurrentPage(nextPage); setCurrentPage(nextPage);
} }
@@ -471,7 +514,7 @@ export function QueueProvider({ children }: { children: ReactNode }) {
} finally { } finally {
setIsLoadingMore(false); setIsLoadingMore(false);
} }
}, [hasMore, isLoadingMore, currentPage, createQueueItemFromTask, itemExists]); }, [hasMore, isLoadingMore, currentPage, createQueueItemFromTask]);
// Note: SSE connection state is managed through the initialize effect and restartSSE method // Note: SSE connection state is managed through the initialize effect and restartSSE method
// The auth context should call restartSSE() when login/logout occurs // The auth context should call restartSSE() when login/logout occurs
@@ -496,13 +539,11 @@ export function QueueProvider({ children }: { children: ReactNode }) {
const { tasks, pagination, total_tasks, task_counts } = response.data; const { tasks, pagination, total_tasks, task_counts } = response.data;
const queueItems = tasks const queueItems = tasks
.filter((task: any) => { .map((task: any) => createQueueItemFromTask(task))
const tempItem = createQueueItemFromTask(task); .filter((qi: QueueItem) => {
const status = getStatus(tempItem); const status = getStatus(qi);
// On refresh, exclude all terminal state tasks to start with a clean queue
return !isTerminalStatus(status); return !isTerminalStatus(status);
}) });
.map((task: any) => createQueueItemFromTask(task));
console.log(`Queue initialized: ${queueItems.length} items (filtered out terminal state tasks)`); console.log(`Queue initialized: ${queueItems.length} items (filtered out terminal state tasks)`);
setItems(queueItems); setItems(queueItems);
@@ -542,8 +583,8 @@ export function QueueProvider({ children }: { children: ReactNode }) {
return; return;
} }
// Check if item already exists in queue // Check if item already exists in queue (by spotify id or identifiers on items)
if (itemExists(item.spotifyId, items)) { if (items.some(i => i.spotifyId === item.spotifyId || i.ids?.spotify === item.spotifyId)) {
toast.info("Item already in queue"); toast.info("Item already in queue");
return; return;
} }
@@ -551,22 +592,22 @@ export function QueueProvider({ children }: { children: ReactNode }) {
const tempId = uuidv4(); const tempId = uuidv4();
pendingDownloads.current.add(item.spotifyId); pendingDownloads.current.add(item.spotifyId);
const newItem: QueueItem = { const newItem: QueueItem = {
id: tempId, id: tempId,
downloadType: item.type, downloadType: item.type,
spotifyId: item.spotifyId, spotifyId: item.spotifyId,
name: item.name, name: item.name,
artist: item.artist || "", artist: item.artist || "",
}; } as QueueItem;
setItems(prev => [newItem, ...prev]); setItems(prev => [newItem, ...prev]);
try { try {
const response = await authApiClient.client.get(`/${item.type}/download/${item.spotifyId}`); const response = await authApiClient.client.get(`/${item.type}/download/${item.spotifyId}`);
const { task_id: taskId } = response.data; const { task_id: taskId } = response.data;
setItems(prev => setItems(prev =>
prev.map(i => prev.map(i =>
i.id === tempId ? { ...i, id: taskId, taskId } : i i.id === tempId ? { ...i, id: taskId, taskId } : i
) )
); );
@@ -575,15 +616,15 @@ export function QueueProvider({ children }: { children: ReactNode }) {
pendingDownloads.current.delete(item.spotifyId); pendingDownloads.current.delete(item.spotifyId);
connectSSE(); // Ensure connection is active connectSSE(); // Ensure connection is active
} catch (error: any) { } catch (error: any) {
console.error(`Failed to start download:`, error); console.error(`Failed to start download:`, error);
toast.error(`Failed to start download for ${item.name}`); toast.error(`Failed to start download for ${item.name}`);
// Remove failed item and clear from pending // Remove failed item and clear from pending
setItems(prev => prev.filter(i => i.id !== tempId)); setItems(prev => prev.filter(i => i.id !== tempId));
pendingDownloads.current.delete(item.spotifyId); pendingDownloads.current.delete(item.spotifyId);
} }
}, [connectSSE, itemExists, items]); }, [connectSSE, items]);
const removeItem = useCallback((id: string) => { const removeItem = useCallback((id: string) => {
const item = items.find(i => i.id === id); const item = items.find(i => i.id === id);
@@ -604,32 +645,18 @@ export function QueueProvider({ children }: { children: ReactNode }) {
}, [items]); }, [items]);
const cancelItem = useCallback(async (id: string) => { const cancelItem = useCallback(async (id: string) => {
const item = items.find(i => i.id === id); const item = items.find(i => i.id === id);
if (!item?.taskId) return; if (!item?.taskId) return;
try { try {
await authApiClient.client.post(`/prgs/cancel/${item.taskId}`); await authApiClient.client.post(`/prgs/cancel/${item.taskId}`);
setItems(prev => // Mark as cancelled via error field to preserve type safety
prev.map(i => setItems(prev => prev.map(i => i.id === id ? { ...i, error: "Cancelled by user" } : i));
i.id === id ? {
...i,
error: "Cancelled by user",
lastCallback: {
status: "cancelled",
timestamp: Date.now() / 1000,
type: item.downloadType,
name: item.name,
artist: item.artist
} as unknown as CallbackObject
} : i
)
);
// Remove immediately after showing cancelled state briefly // Remove shortly after showing cancelled state
setTimeout(() => { setTimeout(() => {
setItems(prev => prev.filter(i => i.id !== id)); setItems(prev => prev.filter(i => i.id !== id));
// Clean up any existing removal timer
if (removalTimers.current[id]) { if (removalTimers.current[id]) {
clearTimeout(removalTimers.current[id]); clearTimeout(removalTimers.current[id]);
delete removalTimers.current[id]; delete removalTimers.current[id];
@@ -637,11 +664,11 @@ export function QueueProvider({ children }: { children: ReactNode }) {
}, 500); }, 500);
toast.info(`Cancelled: ${item.name}`); toast.info(`Cancelled: ${item.name}`);
} catch (error) { } catch (error) {
console.error("Failed to cancel task:", error); console.error("Failed to cancel task:", error);
toast.error(`Failed to cancel: ${item.name}`); toast.error(`Failed to cancel: ${item.name}`);
} }
}, [items, scheduleRemoval]); }, [items]);
const cancelAll = useCallback(async () => { const cancelAll = useCallback(async () => {
const activeItems = items.filter(item => { const activeItems = items.filter(item => {
@@ -657,26 +684,11 @@ export function QueueProvider({ children }: { children: ReactNode }) {
try { try {
await authApiClient.client.post("/prgs/cancel/all"); await authApiClient.client.post("/prgs/cancel/all");
// Mark each active item as cancelled via error field
activeItems.forEach(item => { activeItems.forEach(item => {
setItems(prev => setItems(prev => prev.map(i => i.id === item.id ? { ...i, error: "Cancelled by user" } : i));
prev.map(i =>
i.id === item.id ? {
...i,
error: "Cancelled by user",
lastCallback: {
status: "cancelled",
timestamp: Date.now() / 1000,
type: item.downloadType,
name: item.name,
artist: item.artist
} as unknown as CallbackObject
} : i
)
);
// Remove immediately after showing cancelled state briefly
setTimeout(() => { setTimeout(() => {
setItems(prev => prev.filter(i => i.id !== item.id)); setItems(prev => prev.filter(i => i.id !== item.id));
// Clean up any existing removal timer
if (removalTimers.current[item.id]) { if (removalTimers.current[item.id]) {
clearTimeout(removalTimers.current[item.id]); clearTimeout(removalTimers.current[item.id]);
delete removalTimers.current[item.id]; delete removalTimers.current[item.id];
@@ -689,7 +701,7 @@ export function QueueProvider({ children }: { children: ReactNode }) {
console.error("Failed to cancel all:", error); console.error("Failed to cancel all:", error);
toast.error("Failed to cancel downloads"); toast.error("Failed to cancel downloads");
} }
}, [items, scheduleRemoval]); }, [items]);
const clearCompleted = useCallback(() => { const clearCompleted = useCallback(() => {
setItems(prev => prev.filter(item => { setItems(prev => prev.filter(item => {

View File

@@ -1,23 +1,23 @@
import { createContext, useContext } from "react"; import { createContext, useContext } from "react";
import type { SummaryObject, CallbackObject, TrackCallbackObject, AlbumCallbackObject, PlaylistCallbackObject, ProcessingCallbackObject } from "@/types/callbacks"; import type { SummaryObject, CallbackObject, TrackCallbackObject, AlbumCallbackObject, PlaylistCallbackObject, ProcessingCallbackObject, IDs } from "@/types/callbacks";
export type DownloadType = "track" | "album" | "playlist"; export type DownloadType = "track" | "album" | "playlist";
// Type guards for callback objects // Type guards for callback objects
const isProcessingCallback = (obj: CallbackObject): obj is ProcessingCallbackObject => { const isProcessingCallback = (obj: CallbackObject): obj is ProcessingCallbackObject => {
return "status" in obj && typeof obj.status === "string"; return "status" in obj && typeof (obj as ProcessingCallbackObject).status === "string" && (obj as any).name !== undefined;
}; };
const isTrackCallback = (obj: CallbackObject): obj is TrackCallbackObject => { const isTrackCallback = (obj: CallbackObject): obj is TrackCallbackObject => {
return "track" in obj && "status_info" in obj; return (obj as any).track !== undefined && (obj as any).status_info !== undefined;
}; };
const isAlbumCallback = (obj: CallbackObject): obj is AlbumCallbackObject => { const isAlbumCallback = (obj: CallbackObject): obj is AlbumCallbackObject => {
return "album" in obj && "status_info" in obj; return (obj as any).album !== undefined && (obj as any).status_info !== undefined;
}; };
const isPlaylistCallback = (obj: CallbackObject): obj is PlaylistCallbackObject => { const isPlaylistCallback = (obj: CallbackObject): obj is PlaylistCallbackObject => {
return "playlist" in obj && "status_info" in obj; return (obj as any).playlist !== undefined && (obj as any).status_info !== undefined;
}; };
// Simplified queue item that works directly with callback objects // Simplified queue item that works directly with callback objects
@@ -27,6 +27,9 @@ export interface QueueItem {
downloadType: DownloadType; downloadType: DownloadType;
spotifyId: string; spotifyId: string;
// Primary identifiers from callback (spotify/deezer/isrc/upc)
ids?: IDs;
// Current callback data - this is the source of truth // Current callback data - this is the source of truth
lastCallback?: CallbackObject; lastCallback?: CallbackObject;
@@ -43,6 +46,11 @@ export interface QueueItem {
// Status extraction utilities // Status extraction utilities
export const getStatus = (item: QueueItem): string => { export const getStatus = (item: QueueItem): string => {
// If user locally cancelled the task, reflect it without fabricating a callback
if (item.error === "Cancelled by user") {
return "cancelled";
}
if (!item.lastCallback) { if (!item.lastCallback) {
// Only log if this seems problematic (task has been around for a while) // Only log if this seems problematic (task has been around for a while)
return "initializing"; return "initializing";
@@ -57,32 +65,30 @@ export const getStatus = (item: QueueItem): string => {
if (item.downloadType === "album" || item.downloadType === "playlist") { if (item.downloadType === "album" || item.downloadType === "playlist") {
const currentTrack = item.lastCallback.current_track || 1; const currentTrack = item.lastCallback.current_track || 1;
const totalTracks = item.lastCallback.total_tracks || 1; const totalTracks = item.lastCallback.total_tracks || 1;
const trackStatus = item.lastCallback.status_info.status; const trackStatus = item.lastCallback.status_info.status as string;
// If this is the last track and it's in a terminal state, the parent is done // If this is the last track and it's in a terminal state, the parent is done
if (currentTrack >= totalTracks && ["done", "skipped", "error"].includes(trackStatus)) { if (currentTrack >= totalTracks && ["done", "skipped", "error"].includes(trackStatus)) {
console.log(`🎵 Playlist/Album completed: ${item.name} (track ${currentTrack}/${totalTracks}, status: ${trackStatus})`);
return "completed"; return "completed";
} }
// If track is in terminal state but not the last track, parent is still downloading // If track is in terminal state but not the last track, parent is still downloading
if (["done", "skipped", "error"].includes(trackStatus)) { if (["done", "skipped", "error"].includes(trackStatus)) {
console.log(`🎵 Playlist/Album progress: ${item.name} (track ${currentTrack}/${totalTracks}, status: ${trackStatus}) - continuing...`);
return "downloading"; return "downloading";
} }
// Track is actively being processed // Track is actively being processed
return "downloading"; return "downloading";
} }
return item.lastCallback.status_info.status; return item.lastCallback.status_info.status as string;
} }
if (isAlbumCallback(item.lastCallback)) { if (isAlbumCallback(item.lastCallback)) {
return item.lastCallback.status_info.status; return item.lastCallback.status_info.status as string;
} }
if (isPlaylistCallback(item.lastCallback)) { if (isPlaylistCallback(item.lastCallback)) {
return item.lastCallback.status_info.status; return item.lastCallback.status_info.status as string;
} }
console.warn(`getStatus: Unknown callback type for item ${item.id}:`, item.lastCallback); console.warn(`getStatus: Unknown callback type for item ${item.id}:`, item.lastCallback);
@@ -104,8 +110,8 @@ export const getProgress = (item: QueueItem): number | undefined => {
// For individual tracks // For individual tracks
if (item.downloadType === "track" && isTrackCallback(item.lastCallback)) { if (item.downloadType === "track" && isTrackCallback(item.lastCallback)) {
if (item.lastCallback.status_info.status === "real-time" && "progress" in item.lastCallback.status_info) { if ((item.lastCallback.status_info as any).status === "real-time" && "progress" in (item.lastCallback.status_info as any)) {
return item.lastCallback.status_info.progress; return (item.lastCallback.status_info as any).progress as number;
} }
return undefined; return undefined;
} }
@@ -115,8 +121,9 @@ export const getProgress = (item: QueueItem): number | undefined => {
const callback = item.lastCallback; const callback = item.lastCallback;
const currentTrack = callback.current_track || 1; const currentTrack = callback.current_track || 1;
const totalTracks = callback.total_tracks || 1; const totalTracks = callback.total_tracks || 1;
const trackProgress = (callback.status_info.status === "real-time" && "progress" in callback.status_info) const statusInfo: any = callback.status_info;
? callback.status_info.progress : 0; const trackProgress = (statusInfo.status === "real-time" && "progress" in statusInfo)
? statusInfo.progress : 0;
// Formula: ((completed tracks) + (current track progress / 100)) / total tracks * 100 // Formula: ((completed tracks) + (current track progress / 100)) / total tracks * 100
const completedTracks = currentTrack - 1; const completedTracks = currentTrack - 1;

View File

@@ -7,6 +7,7 @@ import { ProtectedRoute } from "@/components/auth/ProtectedRoute";
import { UserMenu } from "@/components/auth/UserMenu"; import { UserMenu } from "@/components/auth/UserMenu";
import { useContext, useState, useEffect } from "react"; import { useContext, useState, useEffect } from "react";
import { getTheme, toggleTheme } from "@/lib/theme"; import { getTheme, toggleTheme } from "@/lib/theme";
import { useSettings } from "@/contexts/settings-context";
function ThemeToggle() { function ThemeToggle() {
const [currentTheme, setCurrentTheme] = useState<'light' | 'dark' | 'system'>('system'); const [currentTheme, setCurrentTheme] = useState<'light' | 'dark' | 'system'>('system');
@@ -80,6 +81,8 @@ function ThemeToggle() {
function AppLayout() { function AppLayout() {
const { toggleVisibility, totalTasks } = useContext(QueueContext) || {}; const { toggleVisibility, totalTasks } = useContext(QueueContext) || {};
const { settings } = useSettings();
const watchEnabled = !!settings?.watch?.enabled;
return ( return (
<div className="min-h-screen bg-gradient-to-br from-surface-secondary via-surface-muted to-surface-accent dark:from-surface-dark dark:via-surface-muted-dark dark:to-surface-secondary-dark text-content-primary dark:text-content-primary-dark flex flex-col overflow-hidden"> <div className="min-h-screen bg-gradient-to-br from-surface-secondary via-surface-muted to-surface-accent dark:from-surface-dark dark:via-surface-muted-dark dark:to-surface-secondary-dark text-content-primary dark:text-content-primary-dark flex flex-col overflow-hidden">
@@ -92,9 +95,11 @@ function AppLayout() {
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
<ThemeToggle /> <ThemeToggle />
<UserMenu /> <UserMenu />
{watchEnabled && (
<Link to="/watchlist" className="p-2 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark"> <Link to="/watchlist" className="p-2 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark">
<img src="/binoculars.svg" alt="Watchlist" className="w-6 h-6 logo" /> <img src="/binoculars.svg" alt="Watchlist" className="w-6 h-6 logo" />
</Link> </Link>
)}
<Link to="/history" className="p-2 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark"> <Link to="/history" className="p-2 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark">
<img src="/history.svg" alt="History" className="w-6 h-6 logo" /> <img src="/history.svg" alt="History" className="w-6 h-6 logo" />
</Link> </Link>
@@ -144,9 +149,11 @@ function AppLayout() {
<Link to="/" className="p-3 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark"> <Link to="/" className="p-3 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark">
<img src="/home.svg" alt="Home" className="w-6 h-6 logo" /> <img src="/home.svg" alt="Home" className="w-6 h-6 logo" />
</Link> </Link>
{watchEnabled && (
<Link to="/watchlist" className="p-3 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark"> <Link to="/watchlist" className="p-3 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark">
<img src="/binoculars.svg" alt="Watchlist" className="w-6 h-6 logo" /> <img src="/binoculars.svg" alt="Watchlist" className="w-6 h-6 logo" />
</Link> </Link>
)}
<Link to="/history" className="p-3 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark"> <Link to="/history" className="p-3 rounded-full hover:bg-icon-button-hover dark:hover:bg-icon-button-hover-dark">
<img src="/history.svg" alt="History" className="w-6 h-6 logo" /> <img src="/history.svg" alt="History" className="w-6 h-6 logo" />
</Link> </Link>

View File

@@ -222,11 +222,22 @@ export interface SummaryObject {
total_successful: number; total_successful: number;
total_skipped: number; total_skipped: number;
total_failed: number; total_failed: number;
// Optional metadata present in deezspot summaries (album/playlist and sometimes single-track)
service: "spotify" | "deezer";
quality: string; // e.g., "ogg", "flac"
bitrate: string; // e.g., "320k"
m3u_path?: string; // playlist convenience output
// Convenience fields that may appear for single-track flows
final_path?: string;
download_quality?: string; // e.g., "OGG_320"
} }
export interface DoneObject extends BaseStatusObject { export interface DoneObject extends BaseStatusObject {
status: "done"; status: "done";
summary?: SummaryObject; summary?: SummaryObject;
// Convenience fields often present on done for tracks
final_path?: string;
download_quality?: string;
} }
export type StatusInfo = export type StatusInfo =

View File

@@ -1 +0,0 @@

View File

@@ -1,633 +0,0 @@
import sqlite3
from pathlib import Path
import pytest
import json
# Override the autouse credentials fixture from conftest for this module
@pytest.fixture(scope="session", autouse=True)
def setup_credentials_for_tests():
# No-op to avoid external API calls; this shadows the session autouse fixture in conftest.py
yield
def _create_306_history_db(db_path: Path) -> None:
db_path.parent.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(str(db_path)) as conn:
conn.executescript(
"""
CREATE TABLE IF NOT EXISTS download_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
download_type TEXT NOT NULL,
title TEXT NOT NULL,
artists TEXT,
timestamp REAL NOT NULL,
status TEXT NOT NULL,
service TEXT,
quality_format TEXT,
quality_bitrate TEXT,
total_tracks INTEGER,
successful_tracks INTEGER,
failed_tracks INTEGER,
skipped_tracks INTEGER,
children_table TEXT,
task_id TEXT,
external_ids TEXT,
metadata TEXT,
release_date TEXT,
genres TEXT,
images TEXT,
owner TEXT,
album_type TEXT,
duration_total_ms INTEGER,
explicit BOOLEAN
);
CREATE INDEX IF NOT EXISTS idx_download_history_timestamp ON download_history(timestamp);
CREATE INDEX IF NOT EXISTS idx_download_history_type_status ON download_history(download_type, status);
CREATE INDEX IF NOT EXISTS idx_download_history_task_id ON download_history(task_id);
CREATE UNIQUE INDEX IF NOT EXISTS uq_download_history_task_type_ids ON download_history(task_id, download_type, external_ids);
"""
)
# Insert rows that reference non-existent children tables
conn.execute(
"""
INSERT INTO download_history (
download_type, title, artists, timestamp, status, service,
quality_format, quality_bitrate, total_tracks, successful_tracks,
failed_tracks, skipped_tracks, children_table, task_id,
external_ids, metadata, release_date, genres, images, owner,
album_type, duration_total_ms, explicit
) VALUES (?, ?, ?, strftime('%s','now'), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
"album",
"Test Album",
"[]",
"completed",
"spotify",
"FLAC",
"1411kbps",
10,
8,
1,
1,
"album_test1",
"task-album-1",
"{}",
"{}",
"{}",
"[]",
"[]",
"{}",
"album",
123456,
0,
),
)
conn.execute(
"""
INSERT INTO download_history (
download_type, title, artists, timestamp, status, service,
quality_format, quality_bitrate, total_tracks, successful_tracks,
failed_tracks, skipped_tracks, children_table, task_id,
external_ids, metadata, release_date, genres, images, owner,
album_type, duration_total_ms, explicit
) VALUES (?, ?, ?, strftime('%s','now'), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
"playlist",
"Test Playlist",
"[]",
"partial",
"spotify",
"MP3",
"320kbps",
20,
15,
3,
2,
"playlist_test2",
"task-playlist-1",
"{}",
"{}",
"{}",
"[]",
"[]",
"{}",
"",
654321,
0,
),
)
# Create a legacy children table with too-few columns to test schema upgrade
conn.execute(
"CREATE TABLE IF NOT EXISTS album_legacy (id INTEGER PRIMARY KEY AUTOINCREMENT, title TEXT NOT NULL)"
)
# Create a fully-specified children table from docs and add rows
conn.execute(
"""
CREATE TABLE IF NOT EXISTS album_f9e8d7c6b5 (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
artists TEXT,
album_title TEXT,
duration_ms INTEGER,
track_number INTEGER,
disc_number INTEGER,
explicit BOOLEAN,
status TEXT NOT NULL,
external_ids TEXT,
genres TEXT,
isrc TEXT,
timestamp REAL NOT NULL,
position INTEGER,
metadata TEXT
)
"""
)
conn.execute(
"""
INSERT INTO download_history (
download_type, title, artists, timestamp, status, service,
quality_format, quality_bitrate, total_tracks, successful_tracks,
failed_tracks, skipped_tracks, children_table, task_id,
external_ids, metadata, release_date, genres, images, owner,
album_type, duration_total_ms, explicit
) VALUES (?, ?, ?, strftime('%s','now'), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
"album",
"Random Access Memories",
"[\"Daft Punk\"]",
"partial",
"spotify",
"FLAC",
"1411",
13,
12,
1,
0,
"album_f9e8d7c6b5",
"celery-task-id-789",
"{\"spotify\": \"4m2880jivSbbyEGAKfITCa\"}",
"{\"callback_type\": \"album\"}",
"{\"year\": 2013, \"month\": 5, \"day\": 17}",
"[\"disco\", \"funk\"]",
"[{\"url\": \"https://i.scdn.co/image/...\"}]",
None,
"album",
4478293,
0
),
)
conn.executemany(
"""
INSERT INTO album_f9e8d7c6b5 (
title, artists, album_title, duration_ms, track_number, disc_number, explicit, status,
external_ids, genres, isrc, timestamp, position, metadata
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, strftime('%s','now'), ?, ?)
""",
[
(
"Get Lucky (feat. Pharrell Williams & Nile Rodgers)",
"[\"Daft Punk\", \"Pharrell Williams\", \"Nile Rodgers\"]",
"Random Access Memories",
369626,
8,
1,
0,
"completed",
"{\"spotify\": \"69kOkLUCdZlE8ApD28j1JG\", \"isrc\": \"GBUJH1300019\"}",
"[]",
"GBUJH1300019",
0,
"{\"album\": {...}, \"type\": \"track\"}",
),
(
"Lose Yourself to Dance (feat. Pharrell Williams)",
"[\"Daft Punk\", \"Pharrell Williams\"]",
"Random Access Memories",
353893,
6,
1,
0,
"failed",
"{\"spotify\": \"5L95vS64r8PAj5M8H1oYkm\", \"isrc\": \"GBUJH1300017\"}",
"[]",
"GBUJH1300017",
0,
"{\"album\": {...}, \"failure_reason\": \"Could not find matching track on Deezer.\"}",
),
]
)
def _create_306_watch_dbs(playlists_db: Path, artists_db: Path) -> None:
playlists_db.parent.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(str(playlists_db)) as pconn:
pconn.executescript(
"""
CREATE TABLE IF NOT EXISTS watched_playlists (
spotify_id TEXT PRIMARY KEY,
name TEXT,
owner_id TEXT,
owner_name TEXT,
total_tracks INTEGER,
link TEXT,
snapshot_id TEXT,
last_checked INTEGER,
added_at INTEGER,
is_active INTEGER DEFAULT 1
);
"""
)
# Insert a sample watched playlist row (docs example)
pconn.execute(
"""
INSERT OR REPLACE INTO watched_playlists (
spotify_id, name, owner_id, owner_name, total_tracks, link, snapshot_id, last_checked, added_at, is_active
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
"37i9dQZF1DXcBWIGoYBM5M",
"Today's Top Hits",
"spotify",
"Spotify",
50,
"https://open.spotify.com/playlist/37i9dQZF1DXcBWIGoYBM5M",
"MTY3NzE4NjgwMCwwMDAwMDAwMDk1ODVmYjI5ZDY5MGUzN2Q4Y2U4OWY2YmY1ZDE4ZTAy",
1677187000,
1677186950,
1,
),
)
# Create a legacy/minimal playlist dynamic table to test schema upgrade
pconn.execute(
"CREATE TABLE IF NOT EXISTS playlist_legacy (spotify_track_id TEXT PRIMARY KEY, title TEXT)"
)
# Create a fully-specified playlist dynamic table (docs example) and add rows
pconn.execute(
"""
CREATE TABLE IF NOT EXISTS playlist_37i9dQZF1DXcBWIGoYBM5M (
spotify_track_id TEXT PRIMARY KEY,
title TEXT,
artist_names TEXT,
album_name TEXT,
album_artist_names TEXT,
track_number INTEGER,
album_spotify_id TEXT,
duration_ms INTEGER,
added_at_playlist TEXT,
added_to_db INTEGER,
is_present_in_spotify INTEGER,
last_seen_in_spotify INTEGER,
snapshot_id TEXT,
final_path TEXT
)
"""
)
pconn.executemany(
"""
INSERT OR REPLACE INTO playlist_37i9dQZF1DXcBWIGoYBM5M (
spotify_track_id, title, artist_names, album_name, album_artist_names, track_number, album_spotify_id,
duration_ms, added_at_playlist, added_to_db, is_present_in_spotify, last_seen_in_spotify, snapshot_id, final_path
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
[
(
"4k6Uh1HXdhtusDW5y80vNN",
"As It Was",
"Harry Styles",
"Harry's House",
"Harry Styles",
4,
"5r36AJ6VOJtp00oxSkNaAO",
167303,
"2023-02-20T10:00:00Z",
1677186980,
1,
1677187000,
"MTY3NzE4NjgwMCwwMDAwMDAwMDk1ODVmYjI5ZDY5MGUzN2Q4Y2U4OWY2YmY1ZDE4ZTAy",
"/downloads/music/Harry Styles/Harry's House/04 - As It Was.flac",
),
(
"5ww2BF9slyYgAno5EAsoOJ",
"Flowers",
"Miley Cyrus",
"Endless Summer Vacation",
"Miley Cyrus",
1,
"1lw0K2sIKi84gav3e4pG3c",
194952,
"2023-02-23T12:00:00Z",
1677186995,
1,
1677187000,
"MTY3NzE4NjgwMCwwMDAwMDAwMDk1ODVmYjI5ZDY5MGUzN2Q4Y2U4OWY2YmY1ZDE4ZTAy",
None,
),
]
)
with sqlite3.connect(str(artists_db)) as aconn:
aconn.executescript(
"""
CREATE TABLE IF NOT EXISTS watched_artists (
spotify_id TEXT PRIMARY KEY,
name TEXT,
link TEXT,
total_albums_on_spotify INTEGER,
last_checked INTEGER,
added_at INTEGER,
is_active INTEGER DEFAULT 1,
genres TEXT,
popularity INTEGER,
image_url TEXT
);
"""
)
# Insert a sample watched artist row (docs example)
aconn.execute(
"""
INSERT OR REPLACE INTO watched_artists (
spotify_id, name, link, total_albums_on_spotify, last_checked, added_at, is_active, genres, popularity, image_url
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
(
"4oLeXFyACqeem2VImYeBFe",
"Madeon",
"https://open.spotify.com/artist/4oLeXFyACqeem2VImYeBFe",
45,
1677188000,
1677187900,
1,
"electro house, filter house, french house",
65,
"https://i.scdn.co/image/ab6761610000e5eb...",
),
)
# Create a legacy/minimal artist dynamic table to test schema upgrade
aconn.execute(
"CREATE TABLE IF NOT EXISTS artist_legacy (album_spotify_id TEXT PRIMARY KEY, name TEXT)"
)
# Create a fully-specified artist dynamic table (docs example) and add rows
aconn.execute(
"""
CREATE TABLE IF NOT EXISTS artist_4oLeXFyACqeem2VImYeBFe (
album_spotify_id TEXT PRIMARY KEY,
artist_spotify_id TEXT,
name TEXT,
album_group TEXT,
album_type TEXT,
release_date TEXT,
release_date_precision TEXT,
total_tracks INTEGER,
link TEXT,
image_url TEXT,
added_to_db INTEGER,
last_seen_on_spotify INTEGER,
download_task_id TEXT,
download_status INTEGER,
is_fully_downloaded_managed_by_app INTEGER
)
"""
)
aconn.executemany(
"""
INSERT OR REPLACE INTO artist_4oLeXFyACqeem2VImYeBFe (
album_spotify_id, artist_spotify_id, name, album_group, album_type, release_date, release_date_precision,
total_tracks, link, image_url, added_to_db, last_seen_on_spotify, download_task_id, download_status, is_fully_downloaded_managed_by_app
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
[
(
"2GWMnf2ltOQd2v2T62a2m8",
"4oLeXFyACqeem2VImYeBFe",
"Good Faith",
"album",
"album",
"2019-11-15",
"day",
10,
"https://open.spotify.com/album/2GWMnf2ltOQd2v2T62a2m8",
"https://i.scdn.co/image/ab67616d0000b273...",
1677187950,
1677188000,
"celery-task-id-123",
2,
1,
),
(
"2smfe2S0AVaxH2I1a5p55n",
"4oLeXFyACqeem2VImYeBFe",
"Gonna Be Good",
"single",
"single",
"2023-01-19",
"day",
1,
"https://open.spotify.com/album/2smfe2S0AVaxH2I1a5p55n",
"https://i.scdn.co/image/ab67616d0000b273...",
1677187960,
1677188000,
"celery-task-id-456",
1,
0,
),
]
)
def _create_306_accounts(creds_dir: Path, accounts_db: Path) -> None:
creds_dir.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(str(accounts_db)) as conn:
conn.executescript(
"""
CREATE TABLE IF NOT EXISTS spotify (
name TEXT PRIMARY KEY,
region TEXT,
created_at REAL,
updated_at REAL
);
CREATE TABLE IF NOT EXISTS deezer (
name TEXT PRIMARY KEY,
arl TEXT,
region TEXT,
created_at REAL,
updated_at REAL
);
"""
)
conn.execute(
"INSERT OR REPLACE INTO spotify (name, region, created_at, updated_at) VALUES (?, ?, ?, ?)",
("my_main_spotify", "US", 1677190000.0, 1677190000.0),
)
conn.execute(
"INSERT OR REPLACE INTO deezer (name, arl, region, created_at, updated_at) VALUES (?, ?, ?, ?, ?)",
("my_hifi_deezer", "a1b2c3d4e5f6a1b2c3d4e5f6...", "FR", 1677190100.0, 1677190100.0),
)
# Pre-create creds filesystem
search_json = creds_dir / "search.json"
if not search_json.exists():
search_json.write_text('{"client_id":"your_global_spotify_client_id","client_secret":"your_global_spotify_client_secret"}\n', encoding="utf-8")
blobs_dir = creds_dir / "blobs" / "my_main_spotify"
blobs_dir.mkdir(parents=True, exist_ok=True)
creds_blob = blobs_dir / "credentials.json"
if not creds_blob.exists():
creds_blob.write_text(
'{"version":"v1","access_token":"...","expires_at":1677193600,"refresh_token":"...","scope":"user-read-private user-read-email playlist-read-private"}\n',
encoding="utf-8",
)
def _get_columns(db_path: Path, table: str) -> set[str]:
with sqlite3.connect(str(db_path)) as conn:
cur = conn.execute(f"PRAGMA table_info({table})")
return {row[1] for row in cur.fetchall()}
def _get_count(db_path: Path, table: str) -> int:
with sqlite3.connect(str(db_path)) as conn:
cur = conn.execute(f"SELECT COUNT(*) FROM {table}")
return cur.fetchone()[0]
def test_migration_children_tables_created_and_upgraded(tmp_path: Path, monkeypatch: pytest.MonkeyPatch):
# Arrange temp paths
data_dir = tmp_path / "data"
history_db = data_dir / "history" / "download_history.db"
playlists_db = data_dir / "watch" / "playlists.db"
artists_db = data_dir / "watch" / "artists.db"
creds_dir = data_dir / "creds"
accounts_db = creds_dir / "accounts.db"
blobs_dir = creds_dir / "blobs"
search_json = creds_dir / "search.json"
# Create 3.0.6 base schemas and sample data (full simulation)
_create_306_history_db(history_db)
_create_306_watch_dbs(playlists_db, artists_db)
_create_306_accounts(creds_dir, accounts_db)
# Point the migration runner to our temp DBs
from routes.migrations import runner
monkeypatch.setattr(runner, "DATA_DIR", data_dir)
monkeypatch.setattr(runner, "HISTORY_DB", history_db)
monkeypatch.setattr(runner, "WATCH_DIR", data_dir / "watch")
monkeypatch.setattr(runner, "PLAYLISTS_DB", playlists_db)
monkeypatch.setattr(runner, "ARTISTS_DB", artists_db)
monkeypatch.setattr(runner, "CREDS_DIR", creds_dir)
monkeypatch.setattr(runner, "ACCOUNTS_DB", accounts_db)
monkeypatch.setattr(runner, "BLOBS_DIR", blobs_dir)
monkeypatch.setattr(runner, "SEARCH_JSON", search_json)
# Act: run migrations
runner.run_migrations_if_needed()
# Run twice to ensure idempotency
runner.run_migrations_if_needed()
# Assert: referenced children tables exist with expected columns
expected_children_cols = {
"id",
"title",
"artists",
"album_title",
"duration_ms",
"track_number",
"disc_number",
"explicit",
"status",
"external_ids",
"genres",
"isrc",
"timestamp",
"position",
"metadata",
}
assert _get_columns(history_db, "album_test1").issuperset(expected_children_cols)
assert _get_columns(history_db, "playlist_test2").issuperset(expected_children_cols)
# Legacy table upgraded
assert _get_columns(history_db, "album_legacy").issuperset(expected_children_cols)
# Pre-existing children table preserved and correct
assert _get_columns(history_db, "album_f9e8d7c6b5").issuperset(expected_children_cols)
assert _get_count(history_db, "album_f9e8d7c6b5") == 2
# Assert: accounts DB created/preserved with expected tables and columns
assert accounts_db.exists()
spotify_cols = _get_columns(accounts_db, "spotify")
deezer_cols = _get_columns(accounts_db, "deezer")
assert {"name", "region", "created_at", "updated_at"}.issubset(spotify_cols)
assert {"name", "arl", "region", "created_at", "updated_at"}.issubset(deezer_cols)
# Assert: creds filesystem and pre-existing blob preserved
assert blobs_dir.exists() and blobs_dir.is_dir()
assert search_json.exists()
data = json.loads(search_json.read_text())
assert set(data.keys()) == {"client_id", "client_secret"}
assert (blobs_dir / "my_main_spotify" / "credentials.json").exists()
# Assert: watch playlists core and dynamic tables upgraded to/at 3.1.2 schema
watched_playlists_cols = _get_columns(playlists_db, "watched_playlists")
assert {
"spotify_id",
"name",
"owner_id",
"owner_name",
"total_tracks",
"link",
"snapshot_id",
"last_checked",
"added_at",
"is_active",
}.issubset(watched_playlists_cols)
playlist_dynamic_expected = {
"spotify_track_id",
"title",
"artist_names",
"album_name",
"album_artist_names",
"track_number",
"album_spotify_id",
"duration_ms",
"added_at_playlist",
"added_to_db",
"is_present_in_spotify",
"last_seen_in_spotify",
"snapshot_id",
"final_path",
}
assert _get_columns(playlists_db, "playlist_legacy").issuperset(playlist_dynamic_expected)
assert _get_columns(playlists_db, "playlist_37i9dQZF1DXcBWIGoYBM5M").issuperset(playlist_dynamic_expected)
assert _get_count(playlists_db, "playlist_37i9dQZF1DXcBWIGoYBM5M") == 2
# Assert: watch artists core and dynamic tables upgraded to/at 3.1.2 schema
watched_artists_cols = _get_columns(artists_db, "watched_artists")
assert {
"spotify_id",
"name",
"link",
"total_albums_on_spotify",
"last_checked",
"added_at",
"is_active",
"genres",
"popularity",
"image_url",
}.issubset(watched_artists_cols)
artist_dynamic_expected = {
"album_spotify_id",
"artist_spotify_id",
"name",
"album_group",
"album_type",
"release_date",
"release_date_precision",
"total_tracks",
"link",
"image_url",
"added_to_db",
"last_seen_on_spotify",
"download_task_id",
"download_status",
"is_fully_downloaded_managed_by_app",
}
assert _get_columns(artists_db, "artist_legacy").issuperset(artist_dynamic_expected)
assert _get_columns(artists_db, "artist_4oLeXFyACqeem2VImYeBFe").issuperset(artist_dynamic_expected)
assert _get_count(artists_db, "artist_4oLeXFyACqeem2VImYeBFe") == 2

View File

@@ -1,65 +0,0 @@
import sqlite3
from pathlib import Path
import pytest
import sqlite3
from pathlib import Path
import pytest
from routes.migrations.v3_1_0 import MigrationV3_1_0
# Override the autouse credentials fixture from conftest for this module
@pytest.fixture(scope="session", autouse=True)
def setup_credentials_for_tests():
# No-op to avoid external API calls
yield
def _create_310_watch_artists_db(db_path: Path) -> None:
db_path.parent.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(str(db_path)) as conn:
conn.executescript(
"""
CREATE TABLE watched_artists (
spotify_id TEXT PRIMARY KEY,
name TEXT
);
CREATE TABLE "artist_a1b2c3" (
album_spotify_id TEXT PRIMARY KEY,
artist_spotify_id TEXT,
name TEXT,
album_type TEXT,
release_date TEXT,
total_tracks INTEGER,
link TEXT,
image_url TEXT,
added_to_db INTEGER,
last_seen_on_spotify INTEGER
);
"""
)
conn.execute("INSERT INTO watched_artists (spotify_id) VALUES (?)", ('a1b2c3',))
def test_watch_artists_migration(tmp_path):
# 1. Setup mock v3.1.0 database
db_path = tmp_path / "artists.db"
_create_310_watch_artists_db(db_path)
# 2. Run the migration
migration = MigrationV3_1_0()
with sqlite3.connect(db_path) as conn:
# Sanity check before migration
cur = conn.execute('PRAGMA table_info("artist_a1b2c3")')
columns_before = {row[1] for row in cur.fetchall()}
assert 'download_status' not in columns_before
# Apply migration
migration.update_watch_artists(conn)
# 3. Assert migration was successful
cur = conn.execute('PRAGMA table_info("artist_a1b2c3")')
columns_after = {row[1] for row in cur.fetchall()}
expected_columns = migration.ARTIST_ALBUMS_EXPECTED_COLUMNS.keys()
assert set(expected_columns).issubset(columns_after)

View File

@@ -1,135 +0,0 @@
import sqlite3
import unittest
from pathlib import Path
from tempfile import mkdtemp
from shutil import rmtree
import pytest
from routes.migrations.v3_1_1 import MigrationV3_1_1
# Override the autouse credentials fixture from conftest for this module
@pytest.fixture(scope="session", autouse=True)
def setup_credentials_for_tests():
# No-op to avoid external API calls; this shadows the session autouse fixture in conftest.py
yield
class TestMigrationV3_1_1(unittest.TestCase):
"""
Tests the dummy migration from 3.1.1 to 3.1.2, ensuring no changes are made.
"""
def setUp(self):
self.temp_dir = Path(mkdtemp())
self.history_db_path = self.temp_dir / "history" / "download_history.db"
self.artists_db_path = self.temp_dir / "watch" / "artists.db"
self.playlists_db_path = self.temp_dir / "watch" / "playlists.db"
self.accounts_db_path = self.temp_dir / "creds" / "accounts.db"
self._create_mock_databases()
def tearDown(self):
rmtree(self.temp_dir)
def _get_db_schema(self, db_path: Path) -> dict:
"""Helper to get the schema of a database."""
schema = {}
with sqlite3.connect(db_path) as conn:
cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [row[0] for row in cursor.fetchall() if not row[0].startswith("sqlite_")]
for table_name in tables:
info_cursor = conn.execute(f'PRAGMA table_info("{table_name}")')
schema[table_name] = {row[1] for row in info_cursor.fetchall()}
return schema
def _create_mock_databases(self):
"""Creates a set of mock databases with the 3.1.1 schema."""
# History DB
self.history_db_path.parent.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(self.history_db_path) as conn:
conn.executescript(
"""
CREATE TABLE download_history (
id INTEGER PRIMARY KEY, download_type TEXT, title TEXT, artists TEXT,
timestamp REAL, status TEXT, service TEXT, quality_format TEXT,
quality_bitrate TEXT, total_tracks INTEGER, successful_tracks INTEGER,
failed_tracks INTEGER, skipped_tracks INTEGER, children_table TEXT,
task_id TEXT, external_ids TEXT, metadata TEXT, release_date TEXT,
genres TEXT, images TEXT, owner TEXT, album_type TEXT,
duration_total_ms INTEGER, explicit BOOLEAN
);
CREATE TABLE playlist_p1l2a3 (
id INTEGER PRIMARY KEY, title TEXT, artists TEXT, album_title TEXT,
duration_ms INTEGER, track_number INTEGER, disc_number INTEGER,
explicit BOOLEAN, status TEXT, external_ids TEXT, genres TEXT,
isrc TEXT, timestamp REAL, position INTEGER, metadata TEXT
);
"""
)
# Watch Artists DB
self.artists_db_path.parent.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(self.artists_db_path) as conn:
conn.executescript(
"""
CREATE TABLE watched_artists (id TEXT PRIMARY KEY, children_table TEXT);
INSERT INTO watched_artists (id, children_table) VALUES ('a1b2c3d4', 'artist_a1b2c3d4');
CREATE TABLE artist_a1b2c3d4 (
id TEXT PRIMARY KEY, title TEXT, artists TEXT, album_type TEXT,
release_date TEXT, total_tracks INTEGER, external_ids TEXT,
images TEXT, album_group TEXT, release_date_precision TEXT,
download_task_id TEXT, download_status TEXT,
is_fully_downloaded_managed_by_app BOOLEAN
);
"""
)
# Watch Playlists DB
self.playlists_db_path.parent.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(self.playlists_db_path) as conn:
conn.executescript(
"""
CREATE TABLE watched_playlists (id TEXT PRIMARY KEY, children_table TEXT);
CREATE TABLE playlist_p1l2a3 (id TEXT PRIMARY KEY, title TEXT);
"""
)
# Accounts DB
self.accounts_db_path.parent.mkdir(parents=True, exist_ok=True)
with sqlite3.connect(self.accounts_db_path) as conn:
conn.execute("CREATE TABLE accounts (id TEXT PRIMARY KEY, service TEXT, details TEXT);")
def test_migration_leaves_schema_unchanged(self):
"""Asserts that the dummy migration makes no changes to any database."""
# Get initial schemas
initial_schemas = {
"history": self._get_db_schema(self.history_db_path),
"artists": self._get_db_schema(self.artists_db_path),
"playlists": self._get_db_schema(self.playlists_db_path),
"accounts": self._get_db_schema(self.accounts_db_path),
}
# Run the dummy migration
migration = MigrationV3_1_1()
with sqlite3.connect(self.history_db_path) as conn:
migration.update_history(conn)
with sqlite3.connect(self.artists_db_path) as conn:
migration.update_watch_artists(conn)
with sqlite3.connect(self.playlists_db_path) as conn:
migration.update_watch_playlists(conn)
with sqlite3.connect(self.accounts_db_path) as conn:
migration.update_accounts(conn)
# Get final schemas
final_schemas = {
"history": self._get_db_schema(self.history_db_path),
"artists": self._get_db_schema(self.artists_db_path),
"playlists": self._get_db_schema(self.playlists_db_path),
"accounts": self._get_db_schema(self.accounts_db_path),
}
# Assert schemas are identical
self.assertEqual(initial_schemas, final_schemas)
if __name__ == '__main__':
unittest.main()