Merge pull request #299 from spotizerr-dev/dev

3.2.1
This commit is contained in:
Spotizerr
2025-08-21 19:57:35 -05:00
committed by GitHub
21 changed files with 1076 additions and 1210 deletions

View File

@@ -1,5 +1,12 @@
# Contributing guidelines
## Commit format
- All pull requests must be made to `dev` branch
- Use [conventional commit messages](https://www.conventionalcommits.org/en/v1.0.0/). E.g. `feat: add feature` or `fix: resolve issue #69420`
## Feature philosophy
- When implementing a feature related to downloading, follow the rule of choice: Every download must come from an active decision made by the user (e.g. clicking a download button, deciding the user wants a whole artist's discography, etc.). This takes out of the picture features like recommendation algorithms, auto-genererated playlists, etc.

View File

@@ -19,16 +19,15 @@ WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
gosu \
git \
ffmpeg \
ffmpeg gosu\
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/
RUN uv pip install --system -r requirements.txt
# Copy application code (excluding UI source and TS source)
COPY . .

20
app.py
View File

@@ -25,6 +25,7 @@ except Exception as e:
logging.getLogger(__name__).error(
f"Database migration step failed early in startup: {e}", exc_info=True
)
sys.exit(1)
# Import route routers (to be created)
from routes.auth.credentials import router as credentials_router
@@ -47,6 +48,9 @@ from routes.utils.celery_config import REDIS_URL
from routes.auth import AUTH_ENABLED
from routes.auth.middleware import AuthMiddleware
# Import watch manager controls (start/stop) without triggering side effects
from routes.utils.watch.manager import start_watch_manager, stop_watch_manager
# Import and initialize routes (this will start the watch manager)
@@ -166,9 +170,25 @@ async def lifespan(app: FastAPI):
except Exception as e:
logging.error(f"Failed to start Celery workers: {e}")
# Start Watch Manager after Celery is up
try:
start_watch_manager()
logging.info("Watch Manager initialized and registered for shutdown.")
except Exception as e:
logging.error(
f"Could not start Watch Manager: {e}. Watch functionality will be disabled.",
exc_info=True,
)
yield
# Shutdown
try:
stop_watch_manager()
logging.info("Watch Manager stopped")
except Exception as e:
logging.error(f"Error stopping Watch Manager: {e}")
try:
celery_manager.stop()
logging.info("Celery workers stopped")

View File

@@ -1,36 +1,7 @@
import logging
import atexit
# Configure basic logging for the application if not already configured
# This is a good place for it if routes are a central part of your app structure.
logging.basicConfig(
level=logging.INFO, format="%(message)s"
)
# This remains safe to execute on import
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)
# Run DB migrations early so other modules see expected schemas
try:
from routes.migrations import run_migrations_if_needed
run_migrations_if_needed()
logger.info("Database migrations executed (if needed).")
except Exception as e:
logger.error(f"Database migration step failed: {e}", exc_info=True)
try:
from routes.utils.watch.manager import start_watch_manager, stop_watch_manager
# Start the playlist watch manager when the application/blueprint is initialized
start_watch_manager()
# Register the stop function to be called on application exit
atexit.register(stop_watch_manager)
logger.info("Playlist Watch Manager initialized and registered for shutdown.")
except ImportError as e:
logger.error(
f"Could not import or start Playlist Watch Manager: {e}. Playlist watching will be disabled."
)
except Exception as e:
logger.error(
f"An unexpected error occurred during Playlist Watch Manager setup: {e}",
exc_info=True,
)

View File

@@ -129,6 +129,20 @@ async def handle_create_credential(service: str, name: str, request: Request, cu
# For Deezer, it expects 'arl' and 'region'
# Validation is handled within create_credential utility function
result = create_credential(service, name, data)
# set as active Spotify account if none is set
if service == "spotify":
try:
from routes.utils.celery_config import get_config_params as get_main_config_params
from routes.system.config import save_config
config = get_main_config_params()
# The field is likely "spotify" (as used in frontend)
if not config.get("spotify"):
config["spotify"] = name
save_config(config)
except Exception as e:
logger.warning(f"Could not set new Spotify account '{name}' as active: {e}")
return {
"message": f"Credential for '{name}' ({service}) created successfully.",
"details": result,

View File

@@ -3,10 +3,7 @@ import sqlite3
from pathlib import Path
from typing import Optional
from .v3_0_6 import MigrationV3_0_6
from .v3_1_0 import MigrationV3_1_0
from .v3_1_1 import MigrationV3_1_1
from .v3_1_2 import MigrationV3_1_2
from .v3_2_0 import MigrationV3_2_0
logger = logging.getLogger(__name__)
@@ -41,7 +38,7 @@ CHILDREN_EXPECTED_COLUMNS: dict[str, str] = {
"metadata": "TEXT",
}
# 3.1.2 expected schemas for Watch DBs (kept here to avoid importing modules with side-effects)
# 3.2.0 expected schemas for Watch DBs (kept here to avoid importing modules with side-effects)
EXPECTED_WATCHED_PLAYLISTS_COLUMNS: dict[str, str] = {
"spotify_id": "TEXT PRIMARY KEY",
"name": "TEXT",
@@ -103,10 +100,7 @@ EXPECTED_ARTIST_ALBUMS_COLUMNS: dict[str, str] = {
"is_fully_downloaded_managed_by_app": "INTEGER DEFAULT 0",
}
m306 = MigrationV3_0_6()
m310 = MigrationV3_1_0()
m311 = MigrationV3_1_1()
m312 = MigrationV3_1_2()
m320 = MigrationV3_2_0()
def _safe_connect(path: Path) -> Optional[sqlite3.Connection]:
@@ -184,60 +178,53 @@ def _create_or_update_children_table(conn: sqlite3.Connection, table_name: str)
)
def _update_children_tables_for_history(conn: sqlite3.Connection) -> None:
# --- Helper to validate instance is at least 3.1.2 on history DB ---
def _history_children_tables(conn: sqlite3.Connection) -> list[str]:
tables: set[str] = set()
try:
try:
cur = conn.execute(
"SELECT DISTINCT children_table FROM download_history WHERE children_table IS NOT NULL AND TRIM(children_table) != ''"
)
for row in cur.fetchall():
table_name = row[0]
if not table_name:
continue
_create_or_update_children_table(conn, table_name)
except sqlite3.Error as e:
logger.warning(
f"Failed to scan referenced children tables from main history: {e}"
)
try:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE 'album_%' OR name LIKE 'playlist_%') AND name != 'download_history'"
)
for row in cur.fetchall():
table_name = row[0]
_create_or_update_children_table(conn, table_name)
except sqlite3.Error as e:
logger.warning(f"Failed to scan legacy children tables in history DB: {e}")
logger.info("Children history tables migration ensured")
except Exception:
logger.error("Failed migrating children history tables", exc_info=True)
def _ensure_creds_filesystem() -> None:
try:
BLOBS_DIR.mkdir(parents=True, exist_ok=True)
if not SEARCH_JSON.exists():
SEARCH_JSON.write_text(
'{ "client_id": "", "client_secret": "" }\n', encoding="utf-8"
)
logger.info(f"Created default global Spotify creds file at {SEARCH_JSON}")
except Exception:
logger.error(
"Failed to ensure credentials filesystem (blobs/search.json)", exc_info=True
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE 'album_%' OR name LIKE 'playlist_%') AND name != 'download_history'"
)
for row in cur.fetchall():
if row and row[0]:
tables.add(row[0])
except sqlite3.Error as e:
logger.warning(f"Failed to scan sqlite_master for children tables: {e}")
try:
cur = conn.execute(
"SELECT DISTINCT children_table FROM download_history WHERE children_table IS NOT NULL AND TRIM(children_table) != ''"
)
for row in cur.fetchall():
t = row[0]
if t:
tables.add(t)
except sqlite3.Error as e:
logger.warning(f"Failed to scan download_history for children tables: {e}")
return sorted(tables)
def _apply_versioned_updates(
conn: sqlite3.Connection, c_base, u_base, post_update=None
) -> None:
if not c_base(conn):
u_base(conn)
if post_update:
post_update(conn)
def _is_history_at_least_3_2_0(conn: sqlite3.Connection) -> bool:
required_cols = {"service", "quality_format", "quality_bitrate"}
tables = _history_children_tables(conn)
if not tables:
# Nothing to migrate implies OK
return True
for t in tables:
try:
cur = conn.execute(f"PRAGMA table_info({t})")
cols = {row[1] for row in cur.fetchall()}
if not required_cols.issubset(cols):
return False
except sqlite3.OperationalError:
return False
return True
# --- 3.1.2 upgrade helpers for Watch DBs ---
# --- 3.2.0 verification helpers for Watch DBs ---
def _update_watch_playlists_db(conn: sqlite3.Connection) -> None:
@@ -298,10 +285,10 @@ def _update_watch_playlists_db(conn: sqlite3.Connection) -> None:
EXPECTED_PLAYLIST_TRACKS_COLUMNS,
f"playlist tracks ({table_name})",
)
logger.info("Upgraded watch playlists DB to 3.1.2 schema")
logger.info("Upgraded watch playlists DB to 3.2.0 base schema")
except Exception:
logger.error(
"Failed to upgrade watch playlists DB to 3.1.2 schema", exc_info=True
"Failed to upgrade watch playlists DB to 3.2.0 base schema", exc_info=True
)
@@ -361,10 +348,24 @@ def _update_watch_artists_db(conn: sqlite3.Connection) -> None:
EXPECTED_ARTIST_ALBUMS_COLUMNS,
f"artist albums ({table_name})",
)
logger.info("Upgraded watch artists DB to 3.1.2 schema")
logger.info("Upgraded watch artists DB to 3.2.0 base schema")
except Exception:
logger.error(
"Failed to upgrade watch artists DB to 3.1.2 schema", exc_info=True
"Failed to upgrade watch artists DB to 3.2.0 base schema", exc_info=True
)
def _ensure_creds_filesystem() -> None:
try:
BLOBS_DIR.mkdir(parents=True, exist_ok=True)
if not SEARCH_JSON.exists():
SEARCH_JSON.write_text(
'{ "client_id": "", "client_secret": "" }\n', encoding="utf-8"
)
logger.info(f"Created default global Spotify creds file at {SEARCH_JSON}")
except Exception:
logger.error(
"Failed to ensure credentials filesystem (blobs/search.json)", exc_info=True
)
@@ -374,75 +375,42 @@ def run_migrations_if_needed():
return
try:
# History DB
with _safe_connect(HISTORY_DB) as conn:
if conn:
_apply_versioned_updates(
conn,
m306.check_history,
m306.update_history,
post_update=_update_children_tables_for_history,
# Require instance to be at least 3.2.0 on history DB; otherwise abort
with _safe_connect(HISTORY_DB) as history_conn:
if history_conn and not _is_history_at_least_3_2_0(history_conn):
logger.error(
"Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.2.1."
)
raise RuntimeError(
"Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.2.1."
)
_apply_versioned_updates(conn, m311.check_history, m311.update_history)
_apply_versioned_updates(conn, m312.check_history, m312.update_history)
conn.commit()
# Watch playlists DB
with _safe_connect(PLAYLISTS_DB) as conn:
if conn:
_apply_versioned_updates(
conn,
m306.check_watch_playlists,
m306.update_watch_playlists,
)
_apply_versioned_updates(
conn,
m311.check_watch_playlists,
m311.update_watch_playlists,
)
_apply_versioned_updates(
conn,
m312.check_watch_playlists,
m312.update_watch_playlists,
)
_update_watch_playlists_db(conn)
# Apply 3.2.0 additions (batch progress columns)
if not m320.check_watch_playlists(conn):
m320.update_watch_playlists(conn)
conn.commit()
# Watch artists DB
# Watch artists DB (if exists)
if ARTISTS_DB.exists():
with _safe_connect(ARTISTS_DB) as conn:
if conn:
_apply_versioned_updates(
conn, m306.check_watch_artists, m306.update_watch_artists
)
_apply_versioned_updates(
conn, m310.check_watch_artists, m310.update_watch_artists
)
_apply_versioned_updates(
conn, m311.check_watch_artists, m311.update_watch_artists
)
_apply_versioned_updates(
conn, m312.check_watch_artists, m312.update_watch_artists
)
_update_watch_artists_db(conn)
if not m320.check_watch_artists(conn):
m320.update_watch_artists(conn)
conn.commit()
# Accounts DB
# Accounts DB (no changes for this migration path)
with _safe_connect(ACCOUNTS_DB) as conn:
if conn:
_apply_versioned_updates(
conn, m306.check_accounts, m306.update_accounts
)
_apply_versioned_updates(
conn, m311.check_accounts, m311.update_accounts
)
_apply_versioned_updates(
conn, m312.check_accounts, m312.update_accounts
)
conn.commit()
except Exception as e:
logger.error("Error during migration: %s", e, exc_info=True)
raise
else:
_ensure_creds_filesystem()
logger.info("Database migrations check completed")
logger.info("Database migrations check completed (3.2.0 -> 3.2.1 path)")

View File

@@ -1,201 +0,0 @@
import sqlite3
class MigrationV3_0_6:
HISTORY_SQL = """
CREATE TABLE IF NOT EXISTS download_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
download_type TEXT NOT NULL,
title TEXT NOT NULL,
artists TEXT,
timestamp REAL NOT NULL,
status TEXT NOT NULL,
service TEXT,
quality_format TEXT,
quality_bitrate TEXT,
total_tracks INTEGER,
successful_tracks INTEGER,
failed_tracks INTEGER,
skipped_tracks INTEGER,
children_table TEXT,
task_id TEXT,
external_ids TEXT,
metadata TEXT,
release_date TEXT,
genres TEXT,
images TEXT,
owner TEXT,
album_type TEXT,
duration_total_ms INTEGER,
explicit BOOLEAN
);
CREATE INDEX IF NOT EXISTS idx_download_history_timestamp ON download_history(timestamp);
CREATE INDEX IF NOT EXISTS idx_download_history_type_status ON download_history(download_type, status);
CREATE INDEX IF NOT EXISTS idx_download_history_task_id ON download_history(task_id);
CREATE UNIQUE INDEX IF NOT EXISTS uq_download_history_task_type_ids ON download_history(task_id, download_type, external_ids);
"""
WATCH_PLAYLISTS_SQL = """
CREATE TABLE IF NOT EXISTS watched_playlists (
spotify_id TEXT PRIMARY KEY,
name TEXT,
owner_id TEXT,
owner_name TEXT,
total_tracks INTEGER,
link TEXT,
snapshot_id TEXT,
last_checked INTEGER,
added_at INTEGER,
is_active INTEGER DEFAULT 1
);
"""
WATCH_ARTISTS_SQL = """
CREATE TABLE IF NOT EXISTS watched_artists (
spotify_id TEXT PRIMARY KEY,
name TEXT,
link TEXT,
total_albums_on_spotify INTEGER,
last_checked INTEGER,
added_at INTEGER,
is_active INTEGER DEFAULT 1,
genres TEXT,
popularity INTEGER,
image_url TEXT
);
"""
ACCOUNTS_SPOTIFY_SQL = """
CREATE TABLE IF NOT EXISTS spotify (
name TEXT PRIMARY KEY,
region TEXT,
created_at REAL,
updated_at REAL
);
"""
ACCOUNTS_DEEZER_SQL = """
CREATE TABLE IF NOT EXISTS deezer (
name TEXT PRIMARY KEY,
arl TEXT,
region TEXT,
created_at REAL,
updated_at REAL
);
"""
@staticmethod
def _table_columns(conn: sqlite3.Connection, table: str) -> set[str]:
try:
cur = conn.execute(f"PRAGMA table_info({table})")
return {row[1] for row in cur.fetchall()}
except Exception:
return set()
# --- Checks ---
def check_history(self, conn: sqlite3.Connection) -> bool:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='download_history'"
)
if not cur.fetchone():
return False
required = {
"id",
"download_type",
"title",
"artists",
"timestamp",
"status",
"service",
"quality_format",
"quality_bitrate",
"total_tracks",
"successful_tracks",
"failed_tracks",
"skipped_tracks",
"children_table",
"task_id",
"external_ids",
"metadata",
"release_date",
"genres",
"images",
"owner",
"album_type",
"duration_total_ms",
"explicit",
}
return required.issubset(self._table_columns(conn, "download_history"))
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='watched_playlists'"
)
if not cur.fetchone():
return False
required = {
"spotify_id",
"name",
"owner_id",
"owner_name",
"total_tracks",
"link",
"snapshot_id",
"last_checked",
"added_at",
"is_active",
}
return required.issubset(self._table_columns(conn, "watched_playlists"))
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='watched_artists'"
)
if not cur.fetchone():
return False
required = {
"spotify_id",
"name",
"link",
"total_albums_on_spotify",
"last_checked",
"added_at",
"is_active",
"genres",
"popularity",
"image_url",
}
return required.issubset(self._table_columns(conn, "watched_artists"))
def check_accounts(self, conn: sqlite3.Connection) -> bool:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='spotify'"
)
if not cur.fetchone():
return False
if not {"name", "region", "created_at", "updated_at"}.issubset(
self._table_columns(conn, "spotify")
):
return False
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='deezer'"
)
if not cur.fetchone():
return False
return {"name", "arl", "region", "created_at", "updated_at"}.issubset(
self._table_columns(conn, "deezer")
)
# --- Updates ---
def update_history(self, conn: sqlite3.Connection) -> None:
conn.executescript(self.HISTORY_SQL)
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
conn.executescript(self.WATCH_PLAYLISTS_SQL)
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
conn.executescript(self.WATCH_ARTISTS_SQL)
def update_accounts(self, conn: sqlite3.Connection) -> None:
conn.executescript(self.ACCOUNTS_SPOTIFY_SQL)
conn.executescript(self.ACCOUNTS_DEEZER_SQL)

View File

@@ -1,88 +0,0 @@
import sqlite3
import logging
logger = logging.getLogger(__name__)
class MigrationV3_1_0:
ARTIST_ALBUMS_EXPECTED_COLUMNS: dict[str, str] = {
"album_spotify_id": "TEXT PRIMARY KEY",
"artist_spotify_id": "TEXT",
"name": "TEXT",
"album_group": "TEXT",
"album_type": "TEXT",
"release_date": "TEXT",
"release_date_precision": "TEXT",
"total_tracks": "INTEGER",
"link": "TEXT",
"image_url": "TEXT",
"added_to_db": "INTEGER",
"last_seen_on_spotify": "INTEGER",
"download_task_id": "TEXT",
"download_status": "INTEGER DEFAULT 0",
"is_fully_downloaded_managed_by_app": "INTEGER DEFAULT 0",
}
def _table_columns(self, conn: sqlite3.Connection, table: str) -> set[str]:
try:
cur = conn.execute(f"PRAGMA table_info({table})")
return {row[1] for row in cur.fetchall()}
except sqlite3.OperationalError:
return set()
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
"""Checks if the artist-specific tables have the new columns."""
try:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'artist_%' LIMIT 1"
)
first_artist_table = cur.fetchone()
if not first_artist_table:
return True # No artist tables, so no migration needed
table_name = first_artist_table[0]
existing_columns = self._table_columns(conn, table_name)
required_columns = self.ARTIST_ALBUMS_EXPECTED_COLUMNS.keys()
return set(required_columns).issubset(existing_columns)
except Exception as e:
logger.error(f"Error checking artist watch DB schema: {e}")
return False
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
"""Updates all artist-specific tables with new columns."""
try:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'artist_%'"
)
artist_tables = cur.fetchall()
for row in artist_tables:
table_name = row[0]
existing_columns = self._table_columns(conn, table_name)
for col_name, col_type in self.ARTIST_ALBUMS_EXPECTED_COLUMNS.items():
if col_name in existing_columns:
continue
try:
# Remove constraints for ADD COLUMN
col_type_for_add = (
col_type.replace("PRIMARY KEY", "")
.replace("AUTOINCREMENT", "")
.replace("NOT NULL", "")
.strip()
)
conn.execute(
f'ALTER TABLE "{table_name}" ADD COLUMN {col_name} {col_type_for_add}'
)
logger.info(
f"Added column '{col_name}' to table '{table_name}' in artists.db."
)
except sqlite3.OperationalError as e:
logger.warning(
f"Could not add column '{col_name}' to table '{table_name}': {e}"
)
except Exception as e:
logger.error(f"Failed to update artist watch DB: {e}", exc_info=True)

View File

@@ -1,42 +0,0 @@
import sqlite3
class MigrationV3_1_1:
"""
Dummy migration for version 3.1.1 to 3.1.2.
No database schema changes were made between these versions.
This class serves as a placeholder to ensure the migration runner
is aware of this version and can proceed without errors.
"""
def check_history(self, conn: sqlite3.Connection) -> bool:
# No changes, so migration is not needed.
return True
def update_history(self, conn: sqlite3.Connection) -> None:
# No-op
pass
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
# No changes, so migration is not needed.
return True
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
# No-op
pass
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
# No changes, so migration is not needed.
return True
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
# No-op
pass
def check_accounts(self, conn: sqlite3.Connection) -> bool:
# No changes, so migration is not needed.
return True
def update_accounts(self, conn: sqlite3.Connection) -> None:
# No-op
pass

View File

@@ -1,103 +0,0 @@
import sqlite3
import logging
logger = logging.getLogger(__name__)
class MigrationV3_1_2:
"""
Migration for version 3.1.2.
Ensure history children tables (album_*/playlist_*) include service and quality columns.
"""
CHILDREN_EXTRA_COLUMNS: dict[str, str] = {
"service": "TEXT",
"quality_format": "TEXT",
"quality_bitrate": "TEXT",
}
def _table_columns(self, conn: sqlite3.Connection, table: str) -> set[str]:
try:
cur = conn.execute(f"PRAGMA table_info({table})")
return {row[1] for row in cur.fetchall()}
except sqlite3.OperationalError:
return set()
def _list_children_tables(self, conn: sqlite3.Connection) -> list[str]:
tables: set[str] = set()
try:
cur = conn.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE 'album_%' OR name LIKE 'playlist_%') AND name != 'download_history'"
)
for row in cur.fetchall():
if row and row[0]:
tables.add(row[0])
except sqlite3.Error as e:
logger.warning(f"Failed to scan sqlite_master for children tables: {e}")
try:
cur = conn.execute(
"SELECT DISTINCT children_table FROM download_history WHERE children_table IS NOT NULL AND TRIM(children_table) != ''"
)
for row in cur.fetchall():
t = row[0]
if t:
tables.add(t)
except sqlite3.Error as e:
logger.warning(f"Failed to scan download_history for children tables: {e}")
return sorted(tables)
def check_history(self, conn: sqlite3.Connection) -> bool:
tables = self._list_children_tables(conn)
if not tables:
# Nothing to migrate
return True
# If any table is missing any of the extra columns, migration is needed
for t in tables:
cols = self._table_columns(conn, t)
if not set(self.CHILDREN_EXTRA_COLUMNS.keys()).issubset(cols):
return False
return True
def update_history(self, conn: sqlite3.Connection) -> None:
tables = self._list_children_tables(conn)
for t in tables:
existing = self._table_columns(conn, t)
for col_name, col_type in self.CHILDREN_EXTRA_COLUMNS.items():
if col_name in existing:
continue
try:
conn.execute(f"ALTER TABLE {t} ADD COLUMN {col_name} {col_type}")
logger.info(
f"Added column '{col_name} {col_type}' to history children table '{t}'."
)
except sqlite3.OperationalError as e:
logger.warning(
f"Could not add column '{col_name}' to history children table '{t}': {e}"
)
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
# No changes for watch artists in 3.1.2
return True
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
# No-op
pass
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
# No changes for watch playlists in 3.1.2
return True
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
# No-op
pass
def check_accounts(self, conn: sqlite3.Connection) -> bool:
# No changes for accounts in 3.1.2
return True
def update_accounts(self, conn: sqlite3.Connection) -> None:
# No-op
pass

100
routes/migrations/v3_2_0.py Normal file
View File

@@ -0,0 +1,100 @@
import sqlite3
import logging
logger = logging.getLogger(__name__)
class MigrationV3_2_0:
"""
Migration for version 3.2.0 (upgrade path 3.2.0 -> 3.2.1).
- Adds per-item batch progress columns to Watch DBs to support page-by-interval processing.
- Enforces prerequisite: previous instance version must be 3.1.2 (validated by runner).
"""
# New columns to add to watched tables
PLAYLISTS_ADDED_COLUMNS: dict[str, str] = {
"batch_next_offset": "INTEGER DEFAULT 0",
"batch_processing_snapshot_id": "TEXT",
}
ARTISTS_ADDED_COLUMNS: dict[str, str] = {
"batch_next_offset": "INTEGER DEFAULT 0",
}
# --- No-op for history/accounts in 3.2.1 ---
def check_history(self, conn: sqlite3.Connection) -> bool:
return True
def update_history(self, conn: sqlite3.Connection) -> None:
pass
def check_accounts(self, conn: sqlite3.Connection) -> bool:
return True
def update_accounts(self, conn: sqlite3.Connection) -> None:
pass
# --- Watch: playlists ---
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
try:
cur = conn.execute("PRAGMA table_info(watched_playlists)")
cols = {row[1] for row in cur.fetchall()}
return set(self.PLAYLISTS_ADDED_COLUMNS.keys()).issubset(cols)
except sqlite3.OperationalError:
# Table missing means not ready
return False
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
# Add new columns if missing
try:
cur = conn.execute("PRAGMA table_info(watched_playlists)")
existing = {row[1] for row in cur.fetchall()}
for col_name, col_type in self.PLAYLISTS_ADDED_COLUMNS.items():
if col_name in existing:
continue
try:
conn.execute(
f"ALTER TABLE watched_playlists ADD COLUMN {col_name} {col_type}"
)
logger.info(
f"Added column '{col_name} {col_type}' to watched_playlists for 3.2.1 batch progress."
)
except sqlite3.OperationalError as e:
logger.warning(
f"Could not add column '{col_name}' to watched_playlists: {e}"
)
except Exception:
logger.error("Failed to update watched_playlists for 3.2.1", exc_info=True)
# --- Watch: artists ---
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
try:
cur = conn.execute("PRAGMA table_info(watched_artists)")
cols = {row[1] for row in cur.fetchall()}
return set(self.ARTISTS_ADDED_COLUMNS.keys()).issubset(cols)
except sqlite3.OperationalError:
return False
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
try:
cur = conn.execute("PRAGMA table_info(watched_artists)")
existing = {row[1] for row in cur.fetchall()}
for col_name, col_type in self.ARTISTS_ADDED_COLUMNS.items():
if col_name in existing:
continue
try:
conn.execute(
f"ALTER TABLE watched_artists ADD COLUMN {col_name} {col_type}"
)
logger.info(
f"Added column '{col_name} {col_type}' to watched_artists for 3.2.1 batch progress."
)
except sqlite3.OperationalError as e:
logger.warning(
f"Could not add column '{col_name}' to watched_artists: {e}"
)
except Exception:
logger.error("Failed to update watched_artists for 3.2.1", exc_info=True)

View File

@@ -1,4 +1,5 @@
import json
from routes.utils.watch.manager import get_watch_config
import logging
from routes.utils.celery_queue_manager import download_queue_manager
from routes.utils.get_info import get_spotify_info
@@ -86,16 +87,16 @@ def get_artist_discography(
raise
def download_artist_albums(
url, album_type="album,single,compilation", request_args=None, username=None
):
def download_artist_albums(url, album_type=None, request_args=None, username=None):
"""
Download albums by an artist, filtered by album types.
If album_type is not provided, uses the watchedArtistAlbumGroup setting from watch config.
Args:
url (str): Spotify artist URL
album_type (str): Comma-separated list of album types to download
(album, single, compilation, appears_on)
If None, uses watchedArtistAlbumGroup setting
request_args (dict): Original request arguments for tracking
username (str | None): Username initiating the request, used for per-user separation
@@ -118,39 +119,82 @@ def download_artist_albums(
logger.error(error_msg)
raise ValueError(error_msg)
artist_data = get_spotify_info(artist_id, "artist_discography")
# Get watch config to determine which album groups to download
watch_config = get_watch_config()
allowed_groups = [
g.lower()
for g in watch_config.get("watchedArtistAlbumGroup", ["album", "single"])
]
logger.info(
f"Filtering albums by watchedArtistAlbumGroup setting (exact album_group match): {allowed_groups}"
)
if not artist_data or "items" not in artist_data:
# Fetch all artist albums with pagination
all_artist_albums = []
offset = 0
limit = 50 # Spotify API limit for artist albums
logger.info(f"Fetching all albums for artist ID: {artist_id} with pagination")
while True:
logger.debug(
f"Fetching albums for {artist_id}. Limit: {limit}, Offset: {offset}"
)
artist_data_page = get_spotify_info(
artist_id, "artist_discography", limit=limit, offset=offset
)
if not artist_data_page or not isinstance(artist_data_page.get("items"), list):
logger.warning(
f"No album items found or invalid format for artist {artist_id} at offset {offset}. Response: {artist_data_page}"
)
break
current_page_albums = artist_data_page.get("items", [])
if not current_page_albums:
logger.info(
f"No more albums on page for artist {artist_id} at offset {offset}. Total fetched so far: {len(all_artist_albums)}."
)
break
logger.debug(
f"Fetched {len(current_page_albums)} albums on current page for artist {artist_id}."
)
all_artist_albums.extend(current_page_albums)
# Check if Spotify indicates a next page URL
if artist_data_page.get("next"):
offset += limit # Increment offset by the limit used for the request
else:
logger.info(
f"No next page URL for artist {artist_id}. Pagination complete. Total albums fetched: {len(all_artist_albums)}."
)
break
if not all_artist_albums:
raise ValueError(
f"Failed to retrieve artist data or no albums found for artist ID {artist_id}"
)
allowed_types = [t.strip().lower() for t in album_type.split(",")]
logger.info(f"Filtering albums by types: {allowed_types}")
# Filter albums based on the allowed types using album_group field (like in manager.py)
filtered_albums = []
for album in artist_data.get("items", []):
album_type_value = album.get("album_type", "").lower()
for album in all_artist_albums:
album_group_value = album.get("album_group", "").lower()
album_name = album.get("name", "Unknown Album")
album_id = album.get("id", "Unknown ID")
if (
(
"album" in allowed_types
and album_type_value == "album"
and album_group_value == "album"
)
or (
"single" in allowed_types
and album_type_value == "single"
and album_group_value == "single"
)
or ("compilation" in allowed_types and album_type_value == "compilation")
or ("appears_on" in allowed_types and album_group_value == "appears_on")
):
# Exact album_group match only (align with watch manager)
is_matching_group = album_group_value in allowed_groups
logger.debug(
f"Album {album_name} ({album_id}): album_group={album_group_value}. Allowed groups: {allowed_groups}. Match: {is_matching_group}."
)
if is_matching_group:
filtered_albums.append(album)
if not filtered_albums:
logger.warning(f"No albums match the specified types: {album_type}")
logger.warning(f"No albums match the specified groups: {allowed_groups}")
return [], []
successfully_queued_albums = []
@@ -168,7 +212,7 @@ def download_artist_albums(
if not album_url:
logger.warning(
f"Skipping album '{album_name}' because it has no Spotify URL."
f"Skipping album {album_name} because it has no Spotify URL."
)
continue
@@ -211,6 +255,6 @@ def download_artist_albums(
)
logger.info(
f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found."
f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found from {len(filtered_albums)} matching albums out of {len(all_artist_albums)} total albums."
)
return successfully_queued_albums, duplicate_albums

View File

@@ -25,6 +25,9 @@ EXPECTED_WATCHED_PLAYLISTS_COLUMNS = {
"last_checked": "INTEGER",
"added_at": "INTEGER",
"is_active": "INTEGER DEFAULT 1",
# New: batch progress for per-interval page fetching
"batch_next_offset": "INTEGER DEFAULT 0",
"batch_processing_snapshot_id": "TEXT",
}
EXPECTED_PLAYLIST_TRACKS_COLUMNS = {
@@ -55,6 +58,8 @@ EXPECTED_WATCHED_ARTISTS_COLUMNS = {
"genres": "TEXT", # Comma-separated
"popularity": "INTEGER",
"image_url": "TEXT",
# New: batch progress for per-interval page fetching
"batch_next_offset": "INTEGER DEFAULT 0",
}
EXPECTED_ARTIST_ALBUMS_COLUMNS = {
@@ -439,6 +444,61 @@ def update_playlist_snapshot(
)
# --- New: per-playlist batch progress helpers ---
def get_playlist_batch_progress(playlist_spotify_id: str) -> tuple[int, str | None]:
"""Returns (batch_next_offset, batch_processing_snapshot_id) for a watched playlist."""
try:
with _get_playlists_db_connection() as conn:
cursor = conn.cursor()
cursor.execute(
"SELECT batch_next_offset, batch_processing_snapshot_id FROM watched_playlists WHERE spotify_id = ?",
(playlist_spotify_id,),
)
row = cursor.fetchone()
if not row:
return 0, None
next_offset = (
row["batch_next_offset"] if "batch_next_offset" in row.keys() else 0
)
processing_snapshot = (
row["batch_processing_snapshot_id"]
if "batch_processing_snapshot_id" in row.keys()
else None
)
return int(next_offset or 0), processing_snapshot
except sqlite3.Error as e:
logger.error(
f"Error retrieving batch progress for playlist {playlist_spotify_id}: {e}",
exc_info=True,
)
return 0, None
def set_playlist_batch_progress(
playlist_spotify_id: str, next_offset: int, processing_snapshot_id: str | None
) -> None:
"""Updates batch_next_offset and batch_processing_snapshot_id for a watched playlist."""
try:
with _get_playlists_db_connection() as conn:
cursor = conn.cursor()
cursor.execute(
"""
UPDATE watched_playlists
SET batch_next_offset = ?, batch_processing_snapshot_id = ?
WHERE spotify_id = ?
""",
(int(next_offset or 0), processing_snapshot_id, playlist_spotify_id),
)
conn.commit()
except sqlite3.Error as e:
logger.error(
f"Error updating batch progress for playlist {playlist_spotify_id}: {e}",
exc_info=True,
)
def get_playlist_track_ids_from_db(playlist_spotify_id: str):
"""Retrieves all track Spotify IDs from a specific playlist's tracks table in playlists.db."""
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
@@ -773,7 +833,7 @@ def add_specific_tracks_to_playlist_table(
def remove_specific_tracks_from_playlist_table(
playlist_spotify_id: str, track_spotify_ids: list
):
"""Removes specific tracks from the playlist's local track table."""
"""Removes specific tracks from the playlist's local DB table."""
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
if not track_spotify_ids:
return 0
@@ -799,7 +859,7 @@ def remove_specific_tracks_from_playlist_table(
conn.commit()
deleted_count = cursor.rowcount
logger.info(
f"Manually removed {deleted_count} tracks from DB for playlist {playlist_spotify_id}."
f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."
)
return deleted_count
except sqlite3.Error as e:
@@ -1164,6 +1224,53 @@ def update_artist_metadata_after_check(
)
# --- New: per-artist batch progress helpers ---
def get_artist_batch_next_offset(artist_spotify_id: str) -> int:
try:
with _get_artists_db_connection() as conn:
cursor = conn.cursor()
cursor.execute(
"SELECT batch_next_offset FROM watched_artists WHERE spotify_id = ?",
(artist_spotify_id,),
)
row = cursor.fetchone()
if not row:
return 0
return (
int(row["batch_next_offset"])
if "batch_next_offset" in row.keys()
else 0
)
except sqlite3.Error as e:
logger.error(
f"Error retrieving batch_next_offset for artist {artist_spotify_id}: {e}",
exc_info=True,
)
return 0
def set_artist_batch_next_offset(artist_spotify_id: str, next_offset: int) -> None:
try:
with _get_artists_db_connection() as conn:
cursor = conn.cursor()
cursor.execute(
"""
UPDATE watched_artists
SET batch_next_offset = ?
WHERE spotify_id = ?
""",
(int(next_offset or 0), artist_spotify_id),
)
conn.commit()
except sqlite3.Error as e:
logger.error(
f"Error updating batch_next_offset for artist {artist_spotify_id}: {e}",
exc_info=True,
)
def get_artist_album_ids_from_db(artist_spotify_id: str):
"""Retrieves all album Spotify IDs from a specific artist's albums table in artists.db."""
table_name = f"artist_{artist_spotify_id.replace('-', '_')}"
@@ -1289,11 +1396,11 @@ def add_or_update_album_for_artist(
total_tracks,
link,
image_url,
current_time, # added_to_db
current_time, # last_seen_on_spotify
task_id, # download_task_id
download_status, # download_status
0, # is_fully_downloaded_managed_by_app
current_time, # added_to_db
current_time, # last_seen_on_spotify
task_id, # download_task_id
download_status, # download_status
0, # is_fully_downloaded_managed_by_app
)
cursor.execute(
f"""

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
{
"name": "spotizerr-ui",
"private": true,
"version": "3.2.0",
"version": "3.2.1",
"type": "module",
"scripts": {
"dev": "vite",

View File

@@ -85,6 +85,7 @@ export function AccountsTab() {
onSuccess: () => {
toast.success("Account added successfully!");
queryClient.invalidateQueries({ queryKey: ["credentials", activeService] });
queryClient.invalidateQueries({ queryKey: ["config"] }); // Invalidate config to update active Spotify account in UI
setIsAdding(false);
setSubmitError(null);
reset();

View File

@@ -13,6 +13,7 @@ interface WatchSettings {
enabled: boolean;
watchPollIntervalSeconds: number;
watchedArtistAlbumGroup: AlbumGroup[];
maxItemsPerRun: number;
}
interface DownloadSettings {
@@ -92,8 +93,9 @@ export function WatchTab() {
setTimeout(() => setSaveStatus("idle"), 3000);
queryClient.invalidateQueries({ queryKey: ["watchConfig"] });
},
onError: (error) => {
toast.error(`Failed to save settings: ${error.message}`);
onError: (error: any) => {
const message = error?.response?.data?.error || error?.message || "Unknown error";
toast.error(`Failed to save settings: ${message}`);
setSaveStatus("error");
setTimeout(() => setSaveStatus("idle"), 3000);
},
@@ -108,6 +110,7 @@ export function WatchTab() {
}, [config, reset]);
const watchEnabled = watch("enabled");
const maxItemsPerRunValue = watch("maxItemsPerRun");
// Validation effect for watch + download method requirement
useEffect(() => {
@@ -126,8 +129,14 @@ export function WatchTab() {
error = `Watch with Fallback requires accounts for both services. Missing: ${missingServices.join(", ")}. Configure accounts in the Accounts tab.`;
}
// Validate maxItemsPerRun range (1..50)
const mir = Number(maxItemsPerRunValue);
if (!error && (Number.isNaN(mir) || mir < 1 || mir > 50)) {
error = "Max items per run must be between 1 and 50.";
}
setValidationError(error);
}, [watchEnabled, downloadConfig?.realTime, downloadConfig?.fallback, spotifyCredentials?.length, deezerCredentials?.length]);
}, [watchEnabled, downloadConfig?.realTime, downloadConfig?.fallback, spotifyCredentials?.length, deezerCredentials?.length, maxItemsPerRunValue]);
const onSubmit: SubmitHandler<WatchSettings> = (data) => {
// Check validation before submitting
@@ -148,9 +157,18 @@ export function WatchTab() {
return;
}
// Validate maxItemsPerRun in handler too, to be safe
const mir = Number(data.maxItemsPerRun);
if (Number.isNaN(mir) || mir < 1 || mir > 50) {
setValidationError("Max items per run must be between 1 and 50.");
toast.error("Validation failed: Max items per run must be between 1 and 50.");
return;
}
mutation.mutate({
...data,
watchPollIntervalSeconds: Number(data.watchPollIntervalSeconds),
maxItemsPerRun: Number(data.maxItemsPerRun),
});
};
@@ -225,7 +243,20 @@ export function WatchTab() {
{...register("watchPollIntervalSeconds")}
className="block w-full p-2 border bg-input-background dark:bg-input-background-dark border-input-border dark:border-input-border-dark rounded-md focus:outline-none focus:ring-2 focus:ring-input-focus"
/>
<p className="text-sm text-content-muted dark:text-content-muted-dark mt-1">How often to check watched items for updates.</p>
<p className="text-sm text-content-muted dark:text-content-muted-dark mt-1">How often to check for new items in watchlist.</p>
</div>
<div className="flex flex-col gap-2">
<label htmlFor="maxItemsPerRun" className="text-content-primary dark:text-content-primary-dark">Max Items Per Run</label>
<input
id="maxItemsPerRun"
type="number"
min="1"
max="50"
{...register("maxItemsPerRun")}
className="block w-full p-2 border bg-input-background dark:bg-input-background-dark border-input-border dark:border-input-border-dark rounded-md focus:outline-none focus:ring-2 focus:ring-input-focus"
/>
<p className="text-sm text-content-muted dark:text-content-muted-dark mt-1">Batch size per watch cycle (150).</p>
</div>
</div>

View File

@@ -7,165 +7,176 @@ import { useAuth } from "./auth-context";
// --- Case Conversion Utility ---
// This is added here to simplify the fix and avoid module resolution issues.
function snakeToCamel(str: string): string {
return str.replace(/(_\w)/g, (m) => m[1].toUpperCase());
return str.replace(/(_\w)/g, (m) => m[1].toUpperCase());
}
function convertKeysToCamelCase(obj: unknown): unknown {
if (Array.isArray(obj)) {
return obj.map((v) => convertKeysToCamelCase(v));
}
if (typeof obj === "object" && obj !== null) {
return Object.keys(obj).reduce((acc: Record<string, unknown>, key: string) => {
const camelKey = snakeToCamel(key);
acc[camelKey] = convertKeysToCamelCase((obj as Record<string, unknown>)[key]);
return acc;
}, {});
}
return obj;
if (Array.isArray(obj)) {
return obj.map((v) => convertKeysToCamelCase(v));
}
if (typeof obj === "object" && obj !== null) {
return Object.keys(obj).reduce((acc: Record<string, unknown>, key: string) => {
const camelKey = snakeToCamel(key);
acc[camelKey] = convertKeysToCamelCase((obj as Record<string, unknown>)[key]);
return acc;
}, {});
}
return obj;
}
// Redefine AppSettings to match the flat structure of the API response
export type FlatAppSettings = {
service: "spotify" | "deezer";
spotify: string;
spotifyQuality: "NORMAL" | "HIGH" | "VERY_HIGH";
deezer: string;
deezerQuality: "MP3_128" | "MP3_320" | "FLAC";
maxConcurrentDownloads: number;
realTime: boolean;
fallback: boolean;
convertTo: "MP3" | "AAC" | "OGG" | "OPUS" | "FLAC" | "WAV" | "ALAC" | "";
bitrate: string;
maxRetries: number;
retryDelaySeconds: number;
retryDelayIncrease: number;
customDirFormat: string;
customTrackFormat: string;
tracknumPadding: boolean;
saveCover: boolean;
explicitFilter: boolean;
// Add other fields from the old AppSettings as needed by other parts of the app
watch: AppSettings["watch"];
// Add defaults for the new download properties
threads: number;
path: string;
skipExisting: boolean;
m3u: boolean;
hlsThreads: number;
// Frontend-only flag used in DownloadsTab
recursiveQuality: boolean;
separateTracksByUser: boolean;
// Add defaults for the new formatting properties
track: string;
album: string;
playlist: string;
compilation: string;
artistSeparator: string;
spotifyMetadata: boolean;
realTimeMultiplier: number;
service: "spotify" | "deezer";
spotify: string;
spotifyQuality: "NORMAL" | "HIGH" | "VERY_HIGH";
deezer: string;
deezerQuality: "MP3_128" | "MP3_320" | "FLAC";
maxConcurrentDownloads: number;
realTime: boolean;
fallback: boolean;
convertTo: "MP3" | "AAC" | "OGG" | "OPUS" | "FLAC" | "WAV" | "ALAC" | "";
bitrate: string;
maxRetries: number;
retryDelaySeconds: number;
retryDelayIncrease: number;
customDirFormat: string;
customTrackFormat: string;
tracknumPadding: boolean;
saveCover: boolean;
explicitFilter: boolean;
// Add other fields from the old AppSettings as needed by other parts of the app
watch: AppSettings["watch"];
// Add defaults for the new download properties
threads: number;
path: string;
skipExisting: boolean;
m3u: boolean;
hlsThreads: number;
// Frontend-only flag used in DownloadsTab
recursiveQuality: boolean;
separateTracksByUser: boolean;
// Add defaults for the new formatting properties
track: string;
album: string;
playlist: string;
compilation: string;
artistSeparator: string;
spotifyMetadata: boolean;
realTimeMultiplier: number;
};
const defaultSettings: FlatAppSettings = {
service: "spotify",
spotify: "",
spotifyQuality: "NORMAL",
deezer: "",
deezerQuality: "MP3_128",
maxConcurrentDownloads: 3,
realTime: false,
fallback: false,
convertTo: "",
bitrate: "",
maxRetries: 3,
retryDelaySeconds: 5,
retryDelayIncrease: 5,
customDirFormat: "%ar_album%/%album%",
customTrackFormat: "%tracknum%. %music%",
tracknumPadding: true,
saveCover: true,
explicitFilter: false,
// Add defaults for the new download properties
threads: 4,
path: "/downloads",
skipExisting: true,
m3u: false,
hlsThreads: 8,
// Frontend-only default
recursiveQuality: false,
separateTracksByUser: false,
// Add defaults for the new formatting properties
track: "{artist_name}/{album_name}/{track_number} - {track_name}",
album: "{artist_name}/{album_name}",
playlist: "Playlists/{playlist_name}",
compilation: "Compilations/{album_name}",
artistSeparator: "; ",
spotifyMetadata: true,
watch: {
enabled: false,
},
realTimeMultiplier: 0,
service: "spotify",
spotify: "",
spotifyQuality: "NORMAL",
deezer: "",
deezerQuality: "MP3_128",
maxConcurrentDownloads: 3,
realTime: false,
fallback: false,
convertTo: "",
bitrate: "",
maxRetries: 3,
retryDelaySeconds: 5,
retryDelayIncrease: 5,
customDirFormat: "%ar_album%/%album%",
customTrackFormat: "%tracknum%. %music%",
tracknumPadding: true,
saveCover: true,
explicitFilter: false,
// Add defaults for the new download properties
threads: 4,
path: "/downloads",
skipExisting: true,
m3u: false,
hlsThreads: 8,
// Frontend-only default
recursiveQuality: false,
separateTracksByUser: false,
// Add defaults for the new formatting properties
track: "{artist_name}/{album_name}/{track_number} - {track_name}",
album: "{artist_name}/{album_name}",
playlist: "Playlists/{playlist_name}",
compilation: "Compilations/{album_name}",
artistSeparator: "; ",
spotifyMetadata: true,
watch: {
enabled: false,
maxItemsPerRun: 50,
watchPollIntervalSeconds: 3600,
watchedArtistAlbumGroup: ["album", "single"],
},
realTimeMultiplier: 0,
};
interface FetchedCamelCaseSettings {
watchEnabled?: boolean;
watch?: { enabled: boolean };
[key: string]: unknown;
watchEnabled?: boolean;
watch?: { enabled: boolean; maxItemsPerRun?: number; watchPollIntervalSeconds?: number; watchedArtistAlbumGroup?: string[] };
[key: string]: unknown;
}
const fetchSettings = async (): Promise<FlatAppSettings> => {
try {
const [{ data: generalConfig }, { data: watchConfig }] = await Promise.all([
authApiClient.client.get("/config"),
authApiClient.client.get("/config/watch"),
]);
try {
const [{ data: generalConfig }, { data: watchConfig }] = await Promise.all([
authApiClient.client.get("/config"),
authApiClient.client.get("/config/watch"),
]);
const combinedConfig = {
...generalConfig,
watch: watchConfig,
};
const combinedConfig = {
...generalConfig,
watch: watchConfig,
};
// Transform the keys before returning the data
const camelData = convertKeysToCamelCase(combinedConfig) as FetchedCamelCaseSettings;
// Transform the keys before returning the data
const camelData = convertKeysToCamelCase(combinedConfig) as FetchedCamelCaseSettings;
const withDefaults: FlatAppSettings = {
...(camelData as unknown as FlatAppSettings),
// Ensure required frontend-only fields exist
recursiveQuality: Boolean((camelData as any).recursiveQuality ?? false),
realTimeMultiplier: Number((camelData as any).realTimeMultiplier ?? 0),
};
const withDefaults: FlatAppSettings = {
...(camelData as unknown as FlatAppSettings),
// Ensure required frontend-only fields exist
recursiveQuality: Boolean((camelData as any).recursiveQuality ?? false),
realTimeMultiplier: Number((camelData as any).realTimeMultiplier ?? 0),
// Ensure watch subkeys default if missing
watch: {
...(camelData.watch as any),
enabled: Boolean((camelData.watch as any)?.enabled ?? false),
maxItemsPerRun: Number((camelData.watch as any)?.maxItemsPerRun ?? 50),
watchPollIntervalSeconds: Number((camelData.watch as any)?.watchPollIntervalSeconds ?? 3600),
watchedArtistAlbumGroup: (camelData.watch as any)?.watchedArtistAlbumGroup ?? ["album", "single"],
},
};
return withDefaults;
} catch (error: any) {
// If we get authentication errors, return default settings
if (error.response?.status === 401 || error.response?.status === 403) {
console.log("Authentication required for config access, using default settings");
return defaultSettings;
}
// Re-throw other errors for React Query to handle
throw error;
}
return withDefaults;
} catch (error: any) {
// If we get authentication errors, return default settings
if (error.response?.status === 401 || error.response?.status === 403) {
console.log("Authentication required for config access, using default settings");
return defaultSettings;
}
// Re-throw other errors for React Query to handle
throw error;
}
};
export function SettingsProvider({ children }: { children: ReactNode }) {
const { isLoading, authEnabled, isAuthenticated, user } = useAuth();
const { isLoading, authEnabled, isAuthenticated, user } = useAuth();
// Only fetch settings when auth is ready and user is admin (or auth is disabled)
const shouldFetchSettings = !isLoading && (!authEnabled || (isAuthenticated && user?.role === "admin"));
// Only fetch settings when auth is ready and user is admin (or auth is disabled)
const shouldFetchSettings = !isLoading && (!authEnabled || (isAuthenticated && user?.role === "admin"));
const {
data: settings,
isLoading: isSettingsLoading,
isError,
} = useQuery({
queryKey: ["config"],
queryFn: fetchSettings,
staleTime: 1000 * 60 * 5, // 5 minutes
refetchOnWindowFocus: false,
enabled: shouldFetchSettings, // Only run query when auth is ready and user is admin
});
const {
data: settings,
isLoading: isSettingsLoading,
isError,
} = useQuery({
queryKey: ["config"],
queryFn: fetchSettings,
staleTime: 1000 * 60 * 5, // 5 minutes
refetchOnWindowFocus: false,
enabled: shouldFetchSettings, // Only run query when auth is ready and user is admin
});
// Use default settings on error to prevent app crash
const value = { settings: isError ? defaultSettings : settings || null, isLoading: isSettingsLoading };
// Use default settings on error to prevent app crash
const value = { settings: isError ? defaultSettings : settings || null, isLoading: isSettingsLoading };
return <SettingsContext.Provider value={value}>{children}</SettingsContext.Provider>;
return <SettingsContext.Provider value={value}>{children}</SettingsContext.Provider>;
}

View File

@@ -37,7 +37,9 @@ export interface AppSettings {
spotifyMetadata: boolean;
watch: {
enabled: boolean;
// Add other watch properties from the old type if they still exist in the API response
maxItemsPerRun: number;
watchPollIntervalSeconds: number;
watchedArtistAlbumGroup: string[];
};
// Add other root-level properties from the API if they exist
realTimeMultiplier: number;

View File

@@ -246,9 +246,10 @@ export const Artist = () => {
return <div>Artist data could not be fully loaded. Please try again later.</div>;
}
const artistAlbums = applyFilters(albums.filter((album) => album.album_type === "album"));
const artistSingles = applyFilters(albums.filter((album) => album.album_type === "single"));
const artistCompilations = applyFilters(albums.filter((album) => album.album_type === "compilation"));
const artistAlbums = applyFilters(albums.filter((album) => (album.album_group ?? album.album_type) === "album"));
const artistSingles = applyFilters(albums.filter((album) => (album.album_group ?? album.album_type) === "single"));
const artistCompilations = applyFilters(albums.filter((album) => (album.album_group ?? album.album_type) === "compilation"));
const artistAppearsOn = applyFilters(albums.filter((album) => (album.album_group ?? "") === "appears_on"));
return (
<div className="artist-page">
@@ -364,6 +365,18 @@ export const Artist = () => {
</div>
)}
{/* Appears On */}
{artistAppearsOn.length > 0 && (
<div className="mb-12">
<h2 className="text-3xl font-bold mb-6 text-content-primary dark:text-content-primary-dark">Appears On</h2>
<div className="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 xl:grid-cols-5 gap-6">
{artistAppearsOn.map((album) => (
<AlbumCard key={album.id} album={album} onDownload={() => handleDownloadAlbum(album)} />
))}
</div>
</div>
)}
{/* sentinel + loading */}
<div className="flex flex-col items-center gap-2">
{loadingMore && <div className="py-4">Loading more...</div>}

View File

@@ -44,6 +44,7 @@ export interface AlbumType {
id: string;
name: string;
album_type: "album" | "single" | "compilation";
album_group?: "album" | "single" | "compilation" | "appears_on";
artists: ArtistType[];
images: ImageType[];
release_date: string;