@@ -1,5 +1,12 @@
|
||||
# Contributing guidelines
|
||||
|
||||
## Commit format
|
||||
|
||||
- All pull requests must be made to `dev` branch
|
||||
- Use [conventional commit messages](https://www.conventionalcommits.org/en/v1.0.0/). E.g. `feat: add feature` or `fix: resolve issue #69420`
|
||||
|
||||
|
||||
## Feature philosophy
|
||||
|
||||
- When implementing a feature related to downloading, follow the rule of choice: Every download must come from an active decision made by the user (e.g. clicking a download button, deciding the user wants a whole artist's discography, etc.). This takes out of the picture features like recommendation algorithms, auto-genererated playlists, etc.
|
||||
|
||||
|
||||
@@ -19,16 +19,15 @@ WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
gosu \
|
||||
git \
|
||||
ffmpeg \
|
||||
ffmpeg gosu\
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Python dependencies
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
|
||||
# Copy application code (excluding UI source and TS source)
|
||||
COPY . .
|
||||
|
||||
20
app.py
20
app.py
@@ -25,6 +25,7 @@ except Exception as e:
|
||||
logging.getLogger(__name__).error(
|
||||
f"Database migration step failed early in startup: {e}", exc_info=True
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Import route routers (to be created)
|
||||
from routes.auth.credentials import router as credentials_router
|
||||
@@ -47,6 +48,9 @@ from routes.utils.celery_config import REDIS_URL
|
||||
from routes.auth import AUTH_ENABLED
|
||||
from routes.auth.middleware import AuthMiddleware
|
||||
|
||||
# Import watch manager controls (start/stop) without triggering side effects
|
||||
from routes.utils.watch.manager import start_watch_manager, stop_watch_manager
|
||||
|
||||
# Import and initialize routes (this will start the watch manager)
|
||||
|
||||
|
||||
@@ -166,9 +170,25 @@ async def lifespan(app: FastAPI):
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to start Celery workers: {e}")
|
||||
|
||||
# Start Watch Manager after Celery is up
|
||||
try:
|
||||
start_watch_manager()
|
||||
logging.info("Watch Manager initialized and registered for shutdown.")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Could not start Watch Manager: {e}. Watch functionality will be disabled.",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
yield
|
||||
|
||||
# Shutdown
|
||||
try:
|
||||
stop_watch_manager()
|
||||
logging.info("Watch Manager stopped")
|
||||
except Exception as e:
|
||||
logging.error(f"Error stopping Watch Manager: {e}")
|
||||
|
||||
try:
|
||||
celery_manager.stop()
|
||||
logging.info("Celery workers stopped")
|
||||
|
||||
@@ -1,36 +1,7 @@
|
||||
import logging
|
||||
import atexit
|
||||
|
||||
# Configure basic logging for the application if not already configured
|
||||
# This is a good place for it if routes are a central part of your app structure.
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(message)s"
|
||||
)
|
||||
# This remains safe to execute on import
|
||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Run DB migrations early so other modules see expected schemas
|
||||
try:
|
||||
from routes.migrations import run_migrations_if_needed
|
||||
run_migrations_if_needed()
|
||||
logger.info("Database migrations executed (if needed).")
|
||||
except Exception as e:
|
||||
logger.error(f"Database migration step failed: {e}", exc_info=True)
|
||||
|
||||
try:
|
||||
from routes.utils.watch.manager import start_watch_manager, stop_watch_manager
|
||||
|
||||
# Start the playlist watch manager when the application/blueprint is initialized
|
||||
start_watch_manager()
|
||||
# Register the stop function to be called on application exit
|
||||
atexit.register(stop_watch_manager)
|
||||
logger.info("Playlist Watch Manager initialized and registered for shutdown.")
|
||||
except ImportError as e:
|
||||
logger.error(
|
||||
f"Could not import or start Playlist Watch Manager: {e}. Playlist watching will be disabled."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"An unexpected error occurred during Playlist Watch Manager setup: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
@@ -129,6 +129,20 @@ async def handle_create_credential(service: str, name: str, request: Request, cu
|
||||
# For Deezer, it expects 'arl' and 'region'
|
||||
# Validation is handled within create_credential utility function
|
||||
result = create_credential(service, name, data)
|
||||
|
||||
# set as active Spotify account if none is set
|
||||
if service == "spotify":
|
||||
try:
|
||||
from routes.utils.celery_config import get_config_params as get_main_config_params
|
||||
from routes.system.config import save_config
|
||||
config = get_main_config_params()
|
||||
# The field is likely "spotify" (as used in frontend)
|
||||
if not config.get("spotify"):
|
||||
config["spotify"] = name
|
||||
save_config(config)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not set new Spotify account '{name}' as active: {e}")
|
||||
|
||||
return {
|
||||
"message": f"Credential for '{name}' ({service}) created successfully.",
|
||||
"details": result,
|
||||
|
||||
@@ -3,10 +3,7 @@ import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from .v3_0_6 import MigrationV3_0_6
|
||||
from .v3_1_0 import MigrationV3_1_0
|
||||
from .v3_1_1 import MigrationV3_1_1
|
||||
from .v3_1_2 import MigrationV3_1_2
|
||||
from .v3_2_0 import MigrationV3_2_0
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -41,7 +38,7 @@ CHILDREN_EXPECTED_COLUMNS: dict[str, str] = {
|
||||
"metadata": "TEXT",
|
||||
}
|
||||
|
||||
# 3.1.2 expected schemas for Watch DBs (kept here to avoid importing modules with side-effects)
|
||||
# 3.2.0 expected schemas for Watch DBs (kept here to avoid importing modules with side-effects)
|
||||
EXPECTED_WATCHED_PLAYLISTS_COLUMNS: dict[str, str] = {
|
||||
"spotify_id": "TEXT PRIMARY KEY",
|
||||
"name": "TEXT",
|
||||
@@ -103,10 +100,7 @@ EXPECTED_ARTIST_ALBUMS_COLUMNS: dict[str, str] = {
|
||||
"is_fully_downloaded_managed_by_app": "INTEGER DEFAULT 0",
|
||||
}
|
||||
|
||||
m306 = MigrationV3_0_6()
|
||||
m310 = MigrationV3_1_0()
|
||||
m311 = MigrationV3_1_1()
|
||||
m312 = MigrationV3_1_2()
|
||||
m320 = MigrationV3_2_0()
|
||||
|
||||
|
||||
def _safe_connect(path: Path) -> Optional[sqlite3.Connection]:
|
||||
@@ -184,60 +178,53 @@ def _create_or_update_children_table(conn: sqlite3.Connection, table_name: str)
|
||||
)
|
||||
|
||||
|
||||
def _update_children_tables_for_history(conn: sqlite3.Connection) -> None:
|
||||
try:
|
||||
try:
|
||||
cur = conn.execute(
|
||||
"SELECT DISTINCT children_table FROM download_history WHERE children_table IS NOT NULL AND TRIM(children_table) != ''"
|
||||
)
|
||||
for row in cur.fetchall():
|
||||
table_name = row[0]
|
||||
if not table_name:
|
||||
continue
|
||||
_create_or_update_children_table(conn, table_name)
|
||||
except sqlite3.Error as e:
|
||||
logger.warning(
|
||||
f"Failed to scan referenced children tables from main history: {e}"
|
||||
)
|
||||
# --- Helper to validate instance is at least 3.1.2 on history DB ---
|
||||
|
||||
|
||||
def _history_children_tables(conn: sqlite3.Connection) -> list[str]:
|
||||
tables: set[str] = set()
|
||||
try:
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE 'album_%' OR name LIKE 'playlist_%') AND name != 'download_history'"
|
||||
)
|
||||
for row in cur.fetchall():
|
||||
table_name = row[0]
|
||||
_create_or_update_children_table(conn, table_name)
|
||||
if row and row[0]:
|
||||
tables.add(row[0])
|
||||
except sqlite3.Error as e:
|
||||
logger.warning(f"Failed to scan legacy children tables in history DB: {e}")
|
||||
logger.info("Children history tables migration ensured")
|
||||
except Exception:
|
||||
logger.error("Failed migrating children history tables", exc_info=True)
|
||||
logger.warning(f"Failed to scan sqlite_master for children tables: {e}")
|
||||
|
||||
|
||||
def _ensure_creds_filesystem() -> None:
|
||||
try:
|
||||
BLOBS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
if not SEARCH_JSON.exists():
|
||||
SEARCH_JSON.write_text(
|
||||
'{ "client_id": "", "client_secret": "" }\n', encoding="utf-8"
|
||||
)
|
||||
logger.info(f"Created default global Spotify creds file at {SEARCH_JSON}")
|
||||
except Exception:
|
||||
logger.error(
|
||||
"Failed to ensure credentials filesystem (blobs/search.json)", exc_info=True
|
||||
cur = conn.execute(
|
||||
"SELECT DISTINCT children_table FROM download_history WHERE children_table IS NOT NULL AND TRIM(children_table) != ''"
|
||||
)
|
||||
for row in cur.fetchall():
|
||||
t = row[0]
|
||||
if t:
|
||||
tables.add(t)
|
||||
except sqlite3.Error as e:
|
||||
logger.warning(f"Failed to scan download_history for children tables: {e}")
|
||||
|
||||
return sorted(tables)
|
||||
|
||||
|
||||
def _apply_versioned_updates(
|
||||
conn: sqlite3.Connection, c_base, u_base, post_update=None
|
||||
) -> None:
|
||||
if not c_base(conn):
|
||||
u_base(conn)
|
||||
if post_update:
|
||||
post_update(conn)
|
||||
def _is_history_at_least_3_2_0(conn: sqlite3.Connection) -> bool:
|
||||
required_cols = {"service", "quality_format", "quality_bitrate"}
|
||||
tables = _history_children_tables(conn)
|
||||
if not tables:
|
||||
# Nothing to migrate implies OK
|
||||
return True
|
||||
for t in tables:
|
||||
try:
|
||||
cur = conn.execute(f"PRAGMA table_info({t})")
|
||||
cols = {row[1] for row in cur.fetchall()}
|
||||
if not required_cols.issubset(cols):
|
||||
return False
|
||||
except sqlite3.OperationalError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# --- 3.1.2 upgrade helpers for Watch DBs ---
|
||||
# --- 3.2.0 verification helpers for Watch DBs ---
|
||||
|
||||
|
||||
def _update_watch_playlists_db(conn: sqlite3.Connection) -> None:
|
||||
@@ -298,10 +285,10 @@ def _update_watch_playlists_db(conn: sqlite3.Connection) -> None:
|
||||
EXPECTED_PLAYLIST_TRACKS_COLUMNS,
|
||||
f"playlist tracks ({table_name})",
|
||||
)
|
||||
logger.info("Upgraded watch playlists DB to 3.1.2 schema")
|
||||
logger.info("Upgraded watch playlists DB to 3.2.0 base schema")
|
||||
except Exception:
|
||||
logger.error(
|
||||
"Failed to upgrade watch playlists DB to 3.1.2 schema", exc_info=True
|
||||
"Failed to upgrade watch playlists DB to 3.2.0 base schema", exc_info=True
|
||||
)
|
||||
|
||||
|
||||
@@ -361,10 +348,24 @@ def _update_watch_artists_db(conn: sqlite3.Connection) -> None:
|
||||
EXPECTED_ARTIST_ALBUMS_COLUMNS,
|
||||
f"artist albums ({table_name})",
|
||||
)
|
||||
logger.info("Upgraded watch artists DB to 3.1.2 schema")
|
||||
logger.info("Upgraded watch artists DB to 3.2.0 base schema")
|
||||
except Exception:
|
||||
logger.error(
|
||||
"Failed to upgrade watch artists DB to 3.1.2 schema", exc_info=True
|
||||
"Failed to upgrade watch artists DB to 3.2.0 base schema", exc_info=True
|
||||
)
|
||||
|
||||
|
||||
def _ensure_creds_filesystem() -> None:
|
||||
try:
|
||||
BLOBS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
if not SEARCH_JSON.exists():
|
||||
SEARCH_JSON.write_text(
|
||||
'{ "client_id": "", "client_secret": "" }\n', encoding="utf-8"
|
||||
)
|
||||
logger.info(f"Created default global Spotify creds file at {SEARCH_JSON}")
|
||||
except Exception:
|
||||
logger.error(
|
||||
"Failed to ensure credentials filesystem (blobs/search.json)", exc_info=True
|
||||
)
|
||||
|
||||
|
||||
@@ -374,75 +375,42 @@ def run_migrations_if_needed():
|
||||
return
|
||||
|
||||
try:
|
||||
# History DB
|
||||
with _safe_connect(HISTORY_DB) as conn:
|
||||
if conn:
|
||||
_apply_versioned_updates(
|
||||
conn,
|
||||
m306.check_history,
|
||||
m306.update_history,
|
||||
post_update=_update_children_tables_for_history,
|
||||
# Require instance to be at least 3.2.0 on history DB; otherwise abort
|
||||
with _safe_connect(HISTORY_DB) as history_conn:
|
||||
if history_conn and not _is_history_at_least_3_2_0(history_conn):
|
||||
logger.error(
|
||||
"Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.2.1."
|
||||
)
|
||||
raise RuntimeError(
|
||||
"Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.2.1."
|
||||
)
|
||||
_apply_versioned_updates(conn, m311.check_history, m311.update_history)
|
||||
_apply_versioned_updates(conn, m312.check_history, m312.update_history)
|
||||
conn.commit()
|
||||
|
||||
# Watch playlists DB
|
||||
with _safe_connect(PLAYLISTS_DB) as conn:
|
||||
if conn:
|
||||
_apply_versioned_updates(
|
||||
conn,
|
||||
m306.check_watch_playlists,
|
||||
m306.update_watch_playlists,
|
||||
)
|
||||
_apply_versioned_updates(
|
||||
conn,
|
||||
m311.check_watch_playlists,
|
||||
m311.update_watch_playlists,
|
||||
)
|
||||
_apply_versioned_updates(
|
||||
conn,
|
||||
m312.check_watch_playlists,
|
||||
m312.update_watch_playlists,
|
||||
)
|
||||
_update_watch_playlists_db(conn)
|
||||
# Apply 3.2.0 additions (batch progress columns)
|
||||
if not m320.check_watch_playlists(conn):
|
||||
m320.update_watch_playlists(conn)
|
||||
conn.commit()
|
||||
|
||||
# Watch artists DB
|
||||
# Watch artists DB (if exists)
|
||||
if ARTISTS_DB.exists():
|
||||
with _safe_connect(ARTISTS_DB) as conn:
|
||||
if conn:
|
||||
_apply_versioned_updates(
|
||||
conn, m306.check_watch_artists, m306.update_watch_artists
|
||||
)
|
||||
_apply_versioned_updates(
|
||||
conn, m310.check_watch_artists, m310.update_watch_artists
|
||||
)
|
||||
_apply_versioned_updates(
|
||||
conn, m311.check_watch_artists, m311.update_watch_artists
|
||||
)
|
||||
_apply_versioned_updates(
|
||||
conn, m312.check_watch_artists, m312.update_watch_artists
|
||||
)
|
||||
_update_watch_artists_db(conn)
|
||||
if not m320.check_watch_artists(conn):
|
||||
m320.update_watch_artists(conn)
|
||||
conn.commit()
|
||||
|
||||
# Accounts DB
|
||||
# Accounts DB (no changes for this migration path)
|
||||
with _safe_connect(ACCOUNTS_DB) as conn:
|
||||
if conn:
|
||||
_apply_versioned_updates(
|
||||
conn, m306.check_accounts, m306.update_accounts
|
||||
)
|
||||
_apply_versioned_updates(
|
||||
conn, m311.check_accounts, m311.update_accounts
|
||||
)
|
||||
_apply_versioned_updates(
|
||||
conn, m312.check_accounts, m312.update_accounts
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error during migration: %s", e, exc_info=True)
|
||||
raise
|
||||
else:
|
||||
_ensure_creds_filesystem()
|
||||
logger.info("Database migrations check completed")
|
||||
logger.info("Database migrations check completed (3.2.0 -> 3.2.1 path)")
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
import sqlite3
|
||||
|
||||
|
||||
class MigrationV3_0_6:
|
||||
HISTORY_SQL = """
|
||||
CREATE TABLE IF NOT EXISTS download_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
download_type TEXT NOT NULL,
|
||||
title TEXT NOT NULL,
|
||||
artists TEXT,
|
||||
timestamp REAL NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
service TEXT,
|
||||
quality_format TEXT,
|
||||
quality_bitrate TEXT,
|
||||
total_tracks INTEGER,
|
||||
successful_tracks INTEGER,
|
||||
failed_tracks INTEGER,
|
||||
skipped_tracks INTEGER,
|
||||
children_table TEXT,
|
||||
task_id TEXT,
|
||||
external_ids TEXT,
|
||||
metadata TEXT,
|
||||
release_date TEXT,
|
||||
genres TEXT,
|
||||
images TEXT,
|
||||
owner TEXT,
|
||||
album_type TEXT,
|
||||
duration_total_ms INTEGER,
|
||||
explicit BOOLEAN
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_download_history_timestamp ON download_history(timestamp);
|
||||
CREATE INDEX IF NOT EXISTS idx_download_history_type_status ON download_history(download_type, status);
|
||||
CREATE INDEX IF NOT EXISTS idx_download_history_task_id ON download_history(task_id);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS uq_download_history_task_type_ids ON download_history(task_id, download_type, external_ids);
|
||||
"""
|
||||
|
||||
WATCH_PLAYLISTS_SQL = """
|
||||
CREATE TABLE IF NOT EXISTS watched_playlists (
|
||||
spotify_id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
owner_id TEXT,
|
||||
owner_name TEXT,
|
||||
total_tracks INTEGER,
|
||||
link TEXT,
|
||||
snapshot_id TEXT,
|
||||
last_checked INTEGER,
|
||||
added_at INTEGER,
|
||||
is_active INTEGER DEFAULT 1
|
||||
);
|
||||
"""
|
||||
|
||||
WATCH_ARTISTS_SQL = """
|
||||
CREATE TABLE IF NOT EXISTS watched_artists (
|
||||
spotify_id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
link TEXT,
|
||||
total_albums_on_spotify INTEGER,
|
||||
last_checked INTEGER,
|
||||
added_at INTEGER,
|
||||
is_active INTEGER DEFAULT 1,
|
||||
genres TEXT,
|
||||
popularity INTEGER,
|
||||
image_url TEXT
|
||||
);
|
||||
"""
|
||||
|
||||
ACCOUNTS_SPOTIFY_SQL = """
|
||||
CREATE TABLE IF NOT EXISTS spotify (
|
||||
name TEXT PRIMARY KEY,
|
||||
region TEXT,
|
||||
created_at REAL,
|
||||
updated_at REAL
|
||||
);
|
||||
"""
|
||||
|
||||
ACCOUNTS_DEEZER_SQL = """
|
||||
CREATE TABLE IF NOT EXISTS deezer (
|
||||
name TEXT PRIMARY KEY,
|
||||
arl TEXT,
|
||||
region TEXT,
|
||||
created_at REAL,
|
||||
updated_at REAL
|
||||
);
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _table_columns(conn: sqlite3.Connection, table: str) -> set[str]:
|
||||
try:
|
||||
cur = conn.execute(f"PRAGMA table_info({table})")
|
||||
return {row[1] for row in cur.fetchall()}
|
||||
except Exception:
|
||||
return set()
|
||||
|
||||
# --- Checks ---
|
||||
def check_history(self, conn: sqlite3.Connection) -> bool:
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='download_history'"
|
||||
)
|
||||
if not cur.fetchone():
|
||||
return False
|
||||
required = {
|
||||
"id",
|
||||
"download_type",
|
||||
"title",
|
||||
"artists",
|
||||
"timestamp",
|
||||
"status",
|
||||
"service",
|
||||
"quality_format",
|
||||
"quality_bitrate",
|
||||
"total_tracks",
|
||||
"successful_tracks",
|
||||
"failed_tracks",
|
||||
"skipped_tracks",
|
||||
"children_table",
|
||||
"task_id",
|
||||
"external_ids",
|
||||
"metadata",
|
||||
"release_date",
|
||||
"genres",
|
||||
"images",
|
||||
"owner",
|
||||
"album_type",
|
||||
"duration_total_ms",
|
||||
"explicit",
|
||||
}
|
||||
return required.issubset(self._table_columns(conn, "download_history"))
|
||||
|
||||
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='watched_playlists'"
|
||||
)
|
||||
if not cur.fetchone():
|
||||
return False
|
||||
required = {
|
||||
"spotify_id",
|
||||
"name",
|
||||
"owner_id",
|
||||
"owner_name",
|
||||
"total_tracks",
|
||||
"link",
|
||||
"snapshot_id",
|
||||
"last_checked",
|
||||
"added_at",
|
||||
"is_active",
|
||||
}
|
||||
return required.issubset(self._table_columns(conn, "watched_playlists"))
|
||||
|
||||
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='watched_artists'"
|
||||
)
|
||||
if not cur.fetchone():
|
||||
return False
|
||||
required = {
|
||||
"spotify_id",
|
||||
"name",
|
||||
"link",
|
||||
"total_albums_on_spotify",
|
||||
"last_checked",
|
||||
"added_at",
|
||||
"is_active",
|
||||
"genres",
|
||||
"popularity",
|
||||
"image_url",
|
||||
}
|
||||
return required.issubset(self._table_columns(conn, "watched_artists"))
|
||||
|
||||
def check_accounts(self, conn: sqlite3.Connection) -> bool:
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='spotify'"
|
||||
)
|
||||
if not cur.fetchone():
|
||||
return False
|
||||
if not {"name", "region", "created_at", "updated_at"}.issubset(
|
||||
self._table_columns(conn, "spotify")
|
||||
):
|
||||
return False
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='deezer'"
|
||||
)
|
||||
if not cur.fetchone():
|
||||
return False
|
||||
return {"name", "arl", "region", "created_at", "updated_at"}.issubset(
|
||||
self._table_columns(conn, "deezer")
|
||||
)
|
||||
|
||||
# --- Updates ---
|
||||
def update_history(self, conn: sqlite3.Connection) -> None:
|
||||
conn.executescript(self.HISTORY_SQL)
|
||||
|
||||
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
|
||||
conn.executescript(self.WATCH_PLAYLISTS_SQL)
|
||||
|
||||
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
|
||||
conn.executescript(self.WATCH_ARTISTS_SQL)
|
||||
|
||||
def update_accounts(self, conn: sqlite3.Connection) -> None:
|
||||
conn.executescript(self.ACCOUNTS_SPOTIFY_SQL)
|
||||
conn.executescript(self.ACCOUNTS_DEEZER_SQL)
|
||||
@@ -1,88 +0,0 @@
|
||||
import sqlite3
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MigrationV3_1_0:
|
||||
ARTIST_ALBUMS_EXPECTED_COLUMNS: dict[str, str] = {
|
||||
"album_spotify_id": "TEXT PRIMARY KEY",
|
||||
"artist_spotify_id": "TEXT",
|
||||
"name": "TEXT",
|
||||
"album_group": "TEXT",
|
||||
"album_type": "TEXT",
|
||||
"release_date": "TEXT",
|
||||
"release_date_precision": "TEXT",
|
||||
"total_tracks": "INTEGER",
|
||||
"link": "TEXT",
|
||||
"image_url": "TEXT",
|
||||
"added_to_db": "INTEGER",
|
||||
"last_seen_on_spotify": "INTEGER",
|
||||
"download_task_id": "TEXT",
|
||||
"download_status": "INTEGER DEFAULT 0",
|
||||
"is_fully_downloaded_managed_by_app": "INTEGER DEFAULT 0",
|
||||
}
|
||||
|
||||
def _table_columns(self, conn: sqlite3.Connection, table: str) -> set[str]:
|
||||
try:
|
||||
cur = conn.execute(f"PRAGMA table_info({table})")
|
||||
return {row[1] for row in cur.fetchall()}
|
||||
except sqlite3.OperationalError:
|
||||
return set()
|
||||
|
||||
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
|
||||
"""Checks if the artist-specific tables have the new columns."""
|
||||
try:
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'artist_%' LIMIT 1"
|
||||
)
|
||||
first_artist_table = cur.fetchone()
|
||||
|
||||
if not first_artist_table:
|
||||
return True # No artist tables, so no migration needed
|
||||
|
||||
table_name = first_artist_table[0]
|
||||
existing_columns = self._table_columns(conn, table_name)
|
||||
required_columns = self.ARTIST_ALBUMS_EXPECTED_COLUMNS.keys()
|
||||
|
||||
return set(required_columns).issubset(existing_columns)
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking artist watch DB schema: {e}")
|
||||
return False
|
||||
|
||||
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
|
||||
"""Updates all artist-specific tables with new columns."""
|
||||
try:
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'artist_%'"
|
||||
)
|
||||
artist_tables = cur.fetchall()
|
||||
|
||||
for row in artist_tables:
|
||||
table_name = row[0]
|
||||
existing_columns = self._table_columns(conn, table_name)
|
||||
|
||||
for col_name, col_type in self.ARTIST_ALBUMS_EXPECTED_COLUMNS.items():
|
||||
if col_name in existing_columns:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Remove constraints for ADD COLUMN
|
||||
col_type_for_add = (
|
||||
col_type.replace("PRIMARY KEY", "")
|
||||
.replace("AUTOINCREMENT", "")
|
||||
.replace("NOT NULL", "")
|
||||
.strip()
|
||||
)
|
||||
conn.execute(
|
||||
f'ALTER TABLE "{table_name}" ADD COLUMN {col_name} {col_type_for_add}'
|
||||
)
|
||||
logger.info(
|
||||
f"Added column '{col_name}' to table '{table_name}' in artists.db."
|
||||
)
|
||||
except sqlite3.OperationalError as e:
|
||||
logger.warning(
|
||||
f"Could not add column '{col_name}' to table '{table_name}': {e}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update artist watch DB: {e}", exc_info=True)
|
||||
@@ -1,42 +0,0 @@
|
||||
import sqlite3
|
||||
|
||||
|
||||
class MigrationV3_1_1:
|
||||
"""
|
||||
Dummy migration for version 3.1.1 to 3.1.2.
|
||||
No database schema changes were made between these versions.
|
||||
This class serves as a placeholder to ensure the migration runner
|
||||
is aware of this version and can proceed without errors.
|
||||
"""
|
||||
|
||||
def check_history(self, conn: sqlite3.Connection) -> bool:
|
||||
# No changes, so migration is not needed.
|
||||
return True
|
||||
|
||||
def update_history(self, conn: sqlite3.Connection) -> None:
|
||||
# No-op
|
||||
pass
|
||||
|
||||
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
|
||||
# No changes, so migration is not needed.
|
||||
return True
|
||||
|
||||
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
|
||||
# No-op
|
||||
pass
|
||||
|
||||
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
|
||||
# No changes, so migration is not needed.
|
||||
return True
|
||||
|
||||
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
|
||||
# No-op
|
||||
pass
|
||||
|
||||
def check_accounts(self, conn: sqlite3.Connection) -> bool:
|
||||
# No changes, so migration is not needed.
|
||||
return True
|
||||
|
||||
def update_accounts(self, conn: sqlite3.Connection) -> None:
|
||||
# No-op
|
||||
pass
|
||||
@@ -1,103 +0,0 @@
|
||||
import sqlite3
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MigrationV3_1_2:
|
||||
"""
|
||||
Migration for version 3.1.2.
|
||||
Ensure history children tables (album_*/playlist_*) include service and quality columns.
|
||||
"""
|
||||
|
||||
CHILDREN_EXTRA_COLUMNS: dict[str, str] = {
|
||||
"service": "TEXT",
|
||||
"quality_format": "TEXT",
|
||||
"quality_bitrate": "TEXT",
|
||||
}
|
||||
|
||||
def _table_columns(self, conn: sqlite3.Connection, table: str) -> set[str]:
|
||||
try:
|
||||
cur = conn.execute(f"PRAGMA table_info({table})")
|
||||
return {row[1] for row in cur.fetchall()}
|
||||
except sqlite3.OperationalError:
|
||||
return set()
|
||||
|
||||
def _list_children_tables(self, conn: sqlite3.Connection) -> list[str]:
|
||||
tables: set[str] = set()
|
||||
try:
|
||||
cur = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE 'album_%' OR name LIKE 'playlist_%') AND name != 'download_history'"
|
||||
)
|
||||
for row in cur.fetchall():
|
||||
if row and row[0]:
|
||||
tables.add(row[0])
|
||||
except sqlite3.Error as e:
|
||||
logger.warning(f"Failed to scan sqlite_master for children tables: {e}")
|
||||
|
||||
try:
|
||||
cur = conn.execute(
|
||||
"SELECT DISTINCT children_table FROM download_history WHERE children_table IS NOT NULL AND TRIM(children_table) != ''"
|
||||
)
|
||||
for row in cur.fetchall():
|
||||
t = row[0]
|
||||
if t:
|
||||
tables.add(t)
|
||||
except sqlite3.Error as e:
|
||||
logger.warning(f"Failed to scan download_history for children tables: {e}")
|
||||
|
||||
return sorted(tables)
|
||||
|
||||
def check_history(self, conn: sqlite3.Connection) -> bool:
|
||||
tables = self._list_children_tables(conn)
|
||||
if not tables:
|
||||
# Nothing to migrate
|
||||
return True
|
||||
# If any table is missing any of the extra columns, migration is needed
|
||||
for t in tables:
|
||||
cols = self._table_columns(conn, t)
|
||||
if not set(self.CHILDREN_EXTRA_COLUMNS.keys()).issubset(cols):
|
||||
return False
|
||||
return True
|
||||
|
||||
def update_history(self, conn: sqlite3.Connection) -> None:
|
||||
tables = self._list_children_tables(conn)
|
||||
for t in tables:
|
||||
existing = self._table_columns(conn, t)
|
||||
for col_name, col_type in self.CHILDREN_EXTRA_COLUMNS.items():
|
||||
if col_name in existing:
|
||||
continue
|
||||
try:
|
||||
conn.execute(f"ALTER TABLE {t} ADD COLUMN {col_name} {col_type}")
|
||||
logger.info(
|
||||
f"Added column '{col_name} {col_type}' to history children table '{t}'."
|
||||
)
|
||||
except sqlite3.OperationalError as e:
|
||||
logger.warning(
|
||||
f"Could not add column '{col_name}' to history children table '{t}': {e}"
|
||||
)
|
||||
|
||||
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
|
||||
# No changes for watch artists in 3.1.2
|
||||
return True
|
||||
|
||||
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
|
||||
# No-op
|
||||
pass
|
||||
|
||||
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
|
||||
# No changes for watch playlists in 3.1.2
|
||||
return True
|
||||
|
||||
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
|
||||
# No-op
|
||||
pass
|
||||
|
||||
def check_accounts(self, conn: sqlite3.Connection) -> bool:
|
||||
# No changes for accounts in 3.1.2
|
||||
return True
|
||||
|
||||
def update_accounts(self, conn: sqlite3.Connection) -> None:
|
||||
# No-op
|
||||
pass
|
||||
100
routes/migrations/v3_2_0.py
Normal file
100
routes/migrations/v3_2_0.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import sqlite3
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MigrationV3_2_0:
|
||||
"""
|
||||
Migration for version 3.2.0 (upgrade path 3.2.0 -> 3.2.1).
|
||||
- Adds per-item batch progress columns to Watch DBs to support page-by-interval processing.
|
||||
- Enforces prerequisite: previous instance version must be 3.1.2 (validated by runner).
|
||||
"""
|
||||
|
||||
# New columns to add to watched tables
|
||||
PLAYLISTS_ADDED_COLUMNS: dict[str, str] = {
|
||||
"batch_next_offset": "INTEGER DEFAULT 0",
|
||||
"batch_processing_snapshot_id": "TEXT",
|
||||
}
|
||||
|
||||
ARTISTS_ADDED_COLUMNS: dict[str, str] = {
|
||||
"batch_next_offset": "INTEGER DEFAULT 0",
|
||||
}
|
||||
|
||||
# --- No-op for history/accounts in 3.2.1 ---
|
||||
|
||||
def check_history(self, conn: sqlite3.Connection) -> bool:
|
||||
return True
|
||||
|
||||
def update_history(self, conn: sqlite3.Connection) -> None:
|
||||
pass
|
||||
|
||||
def check_accounts(self, conn: sqlite3.Connection) -> bool:
|
||||
return True
|
||||
|
||||
def update_accounts(self, conn: sqlite3.Connection) -> None:
|
||||
pass
|
||||
|
||||
# --- Watch: playlists ---
|
||||
|
||||
def check_watch_playlists(self, conn: sqlite3.Connection) -> bool:
|
||||
try:
|
||||
cur = conn.execute("PRAGMA table_info(watched_playlists)")
|
||||
cols = {row[1] for row in cur.fetchall()}
|
||||
return set(self.PLAYLISTS_ADDED_COLUMNS.keys()).issubset(cols)
|
||||
except sqlite3.OperationalError:
|
||||
# Table missing means not ready
|
||||
return False
|
||||
|
||||
def update_watch_playlists(self, conn: sqlite3.Connection) -> None:
|
||||
# Add new columns if missing
|
||||
try:
|
||||
cur = conn.execute("PRAGMA table_info(watched_playlists)")
|
||||
existing = {row[1] for row in cur.fetchall()}
|
||||
for col_name, col_type in self.PLAYLISTS_ADDED_COLUMNS.items():
|
||||
if col_name in existing:
|
||||
continue
|
||||
try:
|
||||
conn.execute(
|
||||
f"ALTER TABLE watched_playlists ADD COLUMN {col_name} {col_type}"
|
||||
)
|
||||
logger.info(
|
||||
f"Added column '{col_name} {col_type}' to watched_playlists for 3.2.1 batch progress."
|
||||
)
|
||||
except sqlite3.OperationalError as e:
|
||||
logger.warning(
|
||||
f"Could not add column '{col_name}' to watched_playlists: {e}"
|
||||
)
|
||||
except Exception:
|
||||
logger.error("Failed to update watched_playlists for 3.2.1", exc_info=True)
|
||||
|
||||
# --- Watch: artists ---
|
||||
|
||||
def check_watch_artists(self, conn: sqlite3.Connection) -> bool:
|
||||
try:
|
||||
cur = conn.execute("PRAGMA table_info(watched_artists)")
|
||||
cols = {row[1] for row in cur.fetchall()}
|
||||
return set(self.ARTISTS_ADDED_COLUMNS.keys()).issubset(cols)
|
||||
except sqlite3.OperationalError:
|
||||
return False
|
||||
|
||||
def update_watch_artists(self, conn: sqlite3.Connection) -> None:
|
||||
try:
|
||||
cur = conn.execute("PRAGMA table_info(watched_artists)")
|
||||
existing = {row[1] for row in cur.fetchall()}
|
||||
for col_name, col_type in self.ARTISTS_ADDED_COLUMNS.items():
|
||||
if col_name in existing:
|
||||
continue
|
||||
try:
|
||||
conn.execute(
|
||||
f"ALTER TABLE watched_artists ADD COLUMN {col_name} {col_type}"
|
||||
)
|
||||
logger.info(
|
||||
f"Added column '{col_name} {col_type}' to watched_artists for 3.2.1 batch progress."
|
||||
)
|
||||
except sqlite3.OperationalError as e:
|
||||
logger.warning(
|
||||
f"Could not add column '{col_name}' to watched_artists: {e}"
|
||||
)
|
||||
except Exception:
|
||||
logger.error("Failed to update watched_artists for 3.2.1", exc_info=True)
|
||||
@@ -1,4 +1,5 @@
|
||||
import json
|
||||
from routes.utils.watch.manager import get_watch_config
|
||||
import logging
|
||||
from routes.utils.celery_queue_manager import download_queue_manager
|
||||
from routes.utils.get_info import get_spotify_info
|
||||
@@ -86,16 +87,16 @@ def get_artist_discography(
|
||||
raise
|
||||
|
||||
|
||||
def download_artist_albums(
|
||||
url, album_type="album,single,compilation", request_args=None, username=None
|
||||
):
|
||||
def download_artist_albums(url, album_type=None, request_args=None, username=None):
|
||||
"""
|
||||
Download albums by an artist, filtered by album types.
|
||||
If album_type is not provided, uses the watchedArtistAlbumGroup setting from watch config.
|
||||
|
||||
Args:
|
||||
url (str): Spotify artist URL
|
||||
album_type (str): Comma-separated list of album types to download
|
||||
(album, single, compilation, appears_on)
|
||||
If None, uses watchedArtistAlbumGroup setting
|
||||
request_args (dict): Original request arguments for tracking
|
||||
username (str | None): Username initiating the request, used for per-user separation
|
||||
|
||||
@@ -118,39 +119,82 @@ def download_artist_albums(
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
artist_data = get_spotify_info(artist_id, "artist_discography")
|
||||
# Get watch config to determine which album groups to download
|
||||
watch_config = get_watch_config()
|
||||
allowed_groups = [
|
||||
g.lower()
|
||||
for g in watch_config.get("watchedArtistAlbumGroup", ["album", "single"])
|
||||
]
|
||||
logger.info(
|
||||
f"Filtering albums by watchedArtistAlbumGroup setting (exact album_group match): {allowed_groups}"
|
||||
)
|
||||
|
||||
if not artist_data or "items" not in artist_data:
|
||||
# Fetch all artist albums with pagination
|
||||
all_artist_albums = []
|
||||
offset = 0
|
||||
limit = 50 # Spotify API limit for artist albums
|
||||
|
||||
logger.info(f"Fetching all albums for artist ID: {artist_id} with pagination")
|
||||
|
||||
while True:
|
||||
logger.debug(
|
||||
f"Fetching albums for {artist_id}. Limit: {limit}, Offset: {offset}"
|
||||
)
|
||||
artist_data_page = get_spotify_info(
|
||||
artist_id, "artist_discography", limit=limit, offset=offset
|
||||
)
|
||||
|
||||
if not artist_data_page or not isinstance(artist_data_page.get("items"), list):
|
||||
logger.warning(
|
||||
f"No album items found or invalid format for artist {artist_id} at offset {offset}. Response: {artist_data_page}"
|
||||
)
|
||||
break
|
||||
|
||||
current_page_albums = artist_data_page.get("items", [])
|
||||
if not current_page_albums:
|
||||
logger.info(
|
||||
f"No more albums on page for artist {artist_id} at offset {offset}. Total fetched so far: {len(all_artist_albums)}."
|
||||
)
|
||||
break
|
||||
|
||||
logger.debug(
|
||||
f"Fetched {len(current_page_albums)} albums on current page for artist {artist_id}."
|
||||
)
|
||||
all_artist_albums.extend(current_page_albums)
|
||||
|
||||
# Check if Spotify indicates a next page URL
|
||||
if artist_data_page.get("next"):
|
||||
offset += limit # Increment offset by the limit used for the request
|
||||
else:
|
||||
logger.info(
|
||||
f"No next page URL for artist {artist_id}. Pagination complete. Total albums fetched: {len(all_artist_albums)}."
|
||||
)
|
||||
break
|
||||
|
||||
if not all_artist_albums:
|
||||
raise ValueError(
|
||||
f"Failed to retrieve artist data or no albums found for artist ID {artist_id}"
|
||||
)
|
||||
|
||||
allowed_types = [t.strip().lower() for t in album_type.split(",")]
|
||||
logger.info(f"Filtering albums by types: {allowed_types}")
|
||||
|
||||
# Filter albums based on the allowed types using album_group field (like in manager.py)
|
||||
filtered_albums = []
|
||||
for album in artist_data.get("items", []):
|
||||
album_type_value = album.get("album_type", "").lower()
|
||||
for album in all_artist_albums:
|
||||
album_group_value = album.get("album_group", "").lower()
|
||||
album_name = album.get("name", "Unknown Album")
|
||||
album_id = album.get("id", "Unknown ID")
|
||||
|
||||
if (
|
||||
(
|
||||
"album" in allowed_types
|
||||
and album_type_value == "album"
|
||||
and album_group_value == "album"
|
||||
# Exact album_group match only (align with watch manager)
|
||||
is_matching_group = album_group_value in allowed_groups
|
||||
|
||||
logger.debug(
|
||||
f"Album {album_name} ({album_id}): album_group={album_group_value}. Allowed groups: {allowed_groups}. Match: {is_matching_group}."
|
||||
)
|
||||
or (
|
||||
"single" in allowed_types
|
||||
and album_type_value == "single"
|
||||
and album_group_value == "single"
|
||||
)
|
||||
or ("compilation" in allowed_types and album_type_value == "compilation")
|
||||
or ("appears_on" in allowed_types and album_group_value == "appears_on")
|
||||
):
|
||||
|
||||
if is_matching_group:
|
||||
filtered_albums.append(album)
|
||||
|
||||
if not filtered_albums:
|
||||
logger.warning(f"No albums match the specified types: {album_type}")
|
||||
logger.warning(f"No albums match the specified groups: {allowed_groups}")
|
||||
return [], []
|
||||
|
||||
successfully_queued_albums = []
|
||||
@@ -168,7 +212,7 @@ def download_artist_albums(
|
||||
|
||||
if not album_url:
|
||||
logger.warning(
|
||||
f"Skipping album '{album_name}' because it has no Spotify URL."
|
||||
f"Skipping album {album_name} because it has no Spotify URL."
|
||||
)
|
||||
continue
|
||||
|
||||
@@ -211,6 +255,6 @@ def download_artist_albums(
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found."
|
||||
f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found from {len(filtered_albums)} matching albums out of {len(all_artist_albums)} total albums."
|
||||
)
|
||||
return successfully_queued_albums, duplicate_albums
|
||||
|
||||
@@ -25,6 +25,9 @@ EXPECTED_WATCHED_PLAYLISTS_COLUMNS = {
|
||||
"last_checked": "INTEGER",
|
||||
"added_at": "INTEGER",
|
||||
"is_active": "INTEGER DEFAULT 1",
|
||||
# New: batch progress for per-interval page fetching
|
||||
"batch_next_offset": "INTEGER DEFAULT 0",
|
||||
"batch_processing_snapshot_id": "TEXT",
|
||||
}
|
||||
|
||||
EXPECTED_PLAYLIST_TRACKS_COLUMNS = {
|
||||
@@ -55,6 +58,8 @@ EXPECTED_WATCHED_ARTISTS_COLUMNS = {
|
||||
"genres": "TEXT", # Comma-separated
|
||||
"popularity": "INTEGER",
|
||||
"image_url": "TEXT",
|
||||
# New: batch progress for per-interval page fetching
|
||||
"batch_next_offset": "INTEGER DEFAULT 0",
|
||||
}
|
||||
|
||||
EXPECTED_ARTIST_ALBUMS_COLUMNS = {
|
||||
@@ -439,6 +444,61 @@ def update_playlist_snapshot(
|
||||
)
|
||||
|
||||
|
||||
# --- New: per-playlist batch progress helpers ---
|
||||
|
||||
|
||||
def get_playlist_batch_progress(playlist_spotify_id: str) -> tuple[int, str | None]:
|
||||
"""Returns (batch_next_offset, batch_processing_snapshot_id) for a watched playlist."""
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"SELECT batch_next_offset, batch_processing_snapshot_id FROM watched_playlists WHERE spotify_id = ?",
|
||||
(playlist_spotify_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if not row:
|
||||
return 0, None
|
||||
next_offset = (
|
||||
row["batch_next_offset"] if "batch_next_offset" in row.keys() else 0
|
||||
)
|
||||
processing_snapshot = (
|
||||
row["batch_processing_snapshot_id"]
|
||||
if "batch_processing_snapshot_id" in row.keys()
|
||||
else None
|
||||
)
|
||||
return int(next_offset or 0), processing_snapshot
|
||||
except sqlite3.Error as e:
|
||||
logger.error(
|
||||
f"Error retrieving batch progress for playlist {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return 0, None
|
||||
|
||||
|
||||
def set_playlist_batch_progress(
|
||||
playlist_spotify_id: str, next_offset: int, processing_snapshot_id: str | None
|
||||
) -> None:
|
||||
"""Updates batch_next_offset and batch_processing_snapshot_id for a watched playlist."""
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
UPDATE watched_playlists
|
||||
SET batch_next_offset = ?, batch_processing_snapshot_id = ?
|
||||
WHERE spotify_id = ?
|
||||
""",
|
||||
(int(next_offset or 0), processing_snapshot_id, playlist_spotify_id),
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
logger.error(
|
||||
f"Error updating batch progress for playlist {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
def get_playlist_track_ids_from_db(playlist_spotify_id: str):
|
||||
"""Retrieves all track Spotify IDs from a specific playlist's tracks table in playlists.db."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
@@ -773,7 +833,7 @@ def add_specific_tracks_to_playlist_table(
|
||||
def remove_specific_tracks_from_playlist_table(
|
||||
playlist_spotify_id: str, track_spotify_ids: list
|
||||
):
|
||||
"""Removes specific tracks from the playlist's local track table."""
|
||||
"""Removes specific tracks from the playlist's local DB table."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
if not track_spotify_ids:
|
||||
return 0
|
||||
@@ -799,7 +859,7 @@ def remove_specific_tracks_from_playlist_table(
|
||||
conn.commit()
|
||||
deleted_count = cursor.rowcount
|
||||
logger.info(
|
||||
f"Manually removed {deleted_count} tracks from DB for playlist {playlist_spotify_id}."
|
||||
f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."
|
||||
)
|
||||
return deleted_count
|
||||
except sqlite3.Error as e:
|
||||
@@ -1164,6 +1224,53 @@ def update_artist_metadata_after_check(
|
||||
)
|
||||
|
||||
|
||||
# --- New: per-artist batch progress helpers ---
|
||||
|
||||
|
||||
def get_artist_batch_next_offset(artist_spotify_id: str) -> int:
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"SELECT batch_next_offset FROM watched_artists WHERE spotify_id = ?",
|
||||
(artist_spotify_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if not row:
|
||||
return 0
|
||||
return (
|
||||
int(row["batch_next_offset"])
|
||||
if "batch_next_offset" in row.keys()
|
||||
else 0
|
||||
)
|
||||
except sqlite3.Error as e:
|
||||
logger.error(
|
||||
f"Error retrieving batch_next_offset for artist {artist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return 0
|
||||
|
||||
|
||||
def set_artist_batch_next_offset(artist_spotify_id: str, next_offset: int) -> None:
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
UPDATE watched_artists
|
||||
SET batch_next_offset = ?
|
||||
WHERE spotify_id = ?
|
||||
""",
|
||||
(int(next_offset or 0), artist_spotify_id),
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
logger.error(
|
||||
f"Error updating batch_next_offset for artist {artist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
def get_artist_album_ids_from_db(artist_spotify_id: str):
|
||||
"""Retrieves all album Spotify IDs from a specific artist's albums table in artists.db."""
|
||||
table_name = f"artist_{artist_spotify_id.replace('-', '_')}"
|
||||
|
||||
@@ -14,7 +14,6 @@ from routes.utils.watch.db import (
|
||||
get_playlist_total_tracks_from_db,
|
||||
add_tracks_to_playlist_db,
|
||||
update_playlist_snapshot,
|
||||
mark_tracks_as_not_present_in_spotify,
|
||||
update_all_existing_tables_schema,
|
||||
ensure_playlist_table_schema,
|
||||
# Artist watch DB functions
|
||||
@@ -22,6 +21,11 @@ from routes.utils.watch.db import (
|
||||
get_watched_artist,
|
||||
get_artist_album_ids_from_db,
|
||||
update_artist_metadata_after_check, # Renamed from update_artist_metadata
|
||||
# New batch progress helpers
|
||||
get_playlist_batch_progress,
|
||||
set_playlist_batch_progress,
|
||||
get_artist_batch_next_offset,
|
||||
set_artist_batch_next_offset,
|
||||
)
|
||||
from routes.utils.get_info import (
|
||||
get_spotify_info,
|
||||
@@ -47,8 +51,35 @@ DEFAULT_WATCH_CONFIG = {
|
||||
"delayBetweenPlaylistsSeconds": 2,
|
||||
"delayBetweenArtistsSeconds": 5,
|
||||
"useSnapshotIdChecking": True,
|
||||
"maxItemsPerRun": 50,
|
||||
}
|
||||
|
||||
# Round-robin index for one-item-per-interval scheduling
|
||||
_round_robin_index = 0
|
||||
|
||||
# Per-item locks to ensure only one run processes a given item at a time
|
||||
_playlist_locks: Dict[str, threading.RLock] = {}
|
||||
_artist_locks: Dict[str, threading.RLock] = {}
|
||||
_locks_guard = threading.RLock()
|
||||
|
||||
|
||||
def _get_playlist_lock(playlist_spotify_id: str) -> threading.RLock:
|
||||
with _locks_guard:
|
||||
lock = _playlist_locks.get(playlist_spotify_id)
|
||||
if lock is None:
|
||||
lock = threading.RLock()
|
||||
_playlist_locks[playlist_spotify_id] = lock
|
||||
return lock
|
||||
|
||||
|
||||
def _get_artist_lock(artist_spotify_id: str) -> threading.RLock:
|
||||
with _locks_guard:
|
||||
lock = _artist_locks.get(artist_spotify_id)
|
||||
if lock is None:
|
||||
lock = threading.RLock()
|
||||
_artist_locks[artist_spotify_id] = lock
|
||||
return lock
|
||||
|
||||
|
||||
def get_watch_config():
|
||||
"""Loads the watch configuration from main.json's 'watch' key (camelCase).
|
||||
@@ -84,7 +115,8 @@ def get_watch_config():
|
||||
"delay_between_playlists_seconds": "delayBetweenPlaylistsSeconds",
|
||||
"delay_between_artists_seconds": "delayBetweenArtistsSeconds",
|
||||
"use_snapshot_id_checking": "useSnapshotIdChecking",
|
||||
"max_tracks_per_run": "maxTracksPerRun",
|
||||
"max_tracks_per_run": "maxItemsPerRun",
|
||||
"max_items_per_run": "maxItemsPerRun",
|
||||
}
|
||||
migrated_watch = {}
|
||||
for k, v in legacy_watch.items():
|
||||
@@ -108,7 +140,8 @@ def get_watch_config():
|
||||
"delay_between_playlists_seconds": "delayBetweenPlaylistsSeconds",
|
||||
"delay_between_artists_seconds": "delayBetweenArtistsSeconds",
|
||||
"use_snapshot_id_checking": "useSnapshotIdChecking",
|
||||
"max_tracks_per_run": "maxTracksPerRun",
|
||||
"max_tracks_per_run": "maxItemsPerRun",
|
||||
"max_items_per_run": "maxItemsPerRun",
|
||||
}
|
||||
migrated = False
|
||||
for legacy_key, camel_key in legacy_to_camel.items():
|
||||
@@ -116,11 +149,30 @@ def get_watch_config():
|
||||
watch_cfg[camel_key] = watch_cfg.pop(legacy_key)
|
||||
migrated = True
|
||||
|
||||
# Additional migration: if maxTracksPerRun exists but maxItemsPerRun does not, promote it
|
||||
if "maxTracksPerRun" in watch_cfg and "maxItemsPerRun" not in watch_cfg:
|
||||
watch_cfg["maxItemsPerRun"] = watch_cfg.get("maxTracksPerRun")
|
||||
migrated = True
|
||||
|
||||
# Ensure defaults
|
||||
for k, v in DEFAULT_WATCH_CONFIG.items():
|
||||
if k not in watch_cfg:
|
||||
watch_cfg[k] = v
|
||||
|
||||
# Enforce range for maxItemsPerRun (1..50)
|
||||
try:
|
||||
current_value = int(
|
||||
watch_cfg.get("maxItemsPerRun", DEFAULT_WATCH_CONFIG["maxItemsPerRun"])
|
||||
)
|
||||
except Exception:
|
||||
current_value = DEFAULT_WATCH_CONFIG["maxItemsPerRun"]
|
||||
clamped_value = (
|
||||
1 if current_value < 1 else (50 if current_value > 50 else current_value)
|
||||
)
|
||||
if clamped_value != watch_cfg.get("maxItemsPerRun"):
|
||||
watch_cfg["maxItemsPerRun"] = clamped_value
|
||||
migrated = True
|
||||
|
||||
if migrated or legacy_file_found:
|
||||
# Persist migration back to main.json
|
||||
main_cfg["watch"] = watch_cfg
|
||||
@@ -293,10 +345,15 @@ def find_tracks_in_playlist(
|
||||
found_tracks = []
|
||||
not_found_tracks = tracks_to_find.copy()
|
||||
offset = 0
|
||||
limit = 100
|
||||
# Use configured max items per run for pagination (Spotify max 50)
|
||||
try:
|
||||
cfg = get_watch_config()
|
||||
limit = max(1, min(int(cfg.get("maxItemsPerRun", 50)), 50))
|
||||
except Exception:
|
||||
limit = 50
|
||||
|
||||
logger.info(
|
||||
f"Searching for {len(tracks_to_find)} tracks in playlist {playlist_spotify_id} starting from offset {offset}"
|
||||
f"Searching for {len(tracks_to_find)} tracks in playlist {playlist_spotify_id} starting from offset {offset} with limit {limit}"
|
||||
)
|
||||
|
||||
while not_found_tracks and offset < 10000: # Safety limit
|
||||
@@ -349,6 +406,7 @@ def find_tracks_in_playlist(
|
||||
def check_watched_playlists(specific_playlist_id: str = None):
|
||||
"""Checks watched playlists for new tracks and queues downloads.
|
||||
If specific_playlist_id is provided, only that playlist is checked.
|
||||
Processes at most one batch per run (offset advanced between runs) to avoid rate limits.
|
||||
"""
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}"
|
||||
@@ -360,6 +418,11 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
base_dir_fmt = formatting_cfg.get("customDirFormat", "%ar_album%/%album%")
|
||||
base_track_fmt = formatting_cfg.get("customTrackFormat", "%tracknum%. %music%")
|
||||
pad_tracks = formatting_cfg.get("tracknumPadding", True)
|
||||
# Determine pagination limit for this run
|
||||
try:
|
||||
batch_limit = max(1, min(int(config.get("maxItemsPerRun", 50)), 50))
|
||||
except Exception:
|
||||
batch_limit = 50
|
||||
|
||||
if specific_playlist_id:
|
||||
playlist_obj = get_watched_playlist(specific_playlist_id)
|
||||
@@ -379,6 +442,14 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
for playlist_in_db in watched_playlists_to_check:
|
||||
playlist_spotify_id = playlist_in_db["spotify_id"]
|
||||
playlist_name = playlist_in_db["name"]
|
||||
playlist_lock = _get_playlist_lock(playlist_spotify_id)
|
||||
logger.debug(
|
||||
f"Playlist Watch Manager: Waiting for lock on playlist {playlist_spotify_id}..."
|
||||
)
|
||||
with playlist_lock:
|
||||
logger.debug(
|
||||
f"Playlist Watch Manager: Acquired lock for playlist {playlist_spotify_id}."
|
||||
)
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})..."
|
||||
)
|
||||
@@ -406,154 +477,118 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
playlist_changed = has_playlist_changed(
|
||||
playlist_spotify_id, api_snapshot_id
|
||||
)
|
||||
else:
|
||||
playlist_changed = True # Force full check
|
||||
|
||||
if not playlist_changed:
|
||||
# Determine if we need a full multi-run sync
|
||||
needs_full_sync = False
|
||||
if playlist_changed:
|
||||
needs_full_sync = True
|
||||
else:
|
||||
# Even if playlist snapshot_id hasn't changed, check if individual tracks need sync
|
||||
needs_sync, tracks_to_find = needs_track_sync(
|
||||
playlist_spotify_id, api_snapshot_id, api_total_tracks
|
||||
)
|
||||
|
||||
if not needs_sync:
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Playlist '{playlist_name}' ({playlist_spotify_id}) has not changed since last check (snapshot_id: {api_snapshot_id}). Skipping detailed check."
|
||||
f"Playlist Watch Manager: Playlist '{playlist_name}' ({playlist_spotify_id}) unchanged (snapshot {api_snapshot_id}). Skipping."
|
||||
)
|
||||
continue
|
||||
else:
|
||||
if not tracks_to_find:
|
||||
# Empty tracks_to_find means full sync is needed (track count mismatch detected)
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Playlist '{playlist_name}' snapshot_id unchanged, but full sync needed due to track count mismatch. Proceeding with full check."
|
||||
)
|
||||
# Continue to full sync below
|
||||
# Track count mismatch → treat as full sync
|
||||
needs_full_sync = True
|
||||
else:
|
||||
# Targeted sync required. To avoid rate limits, process only one page this run.
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Playlist '{playlist_name}' snapshot_id unchanged, but {len(tracks_to_find)} tracks need sync. Proceeding with targeted check."
|
||||
f"Playlist Watch Manager: Targeted sync for '{playlist_name}' with {len(tracks_to_find)} tracks needing update. Processing one page (limit={batch_limit})."
|
||||
)
|
||||
# Use targeted track search instead of full fetch
|
||||
found_tracks, not_found_tracks = find_tracks_in_playlist(
|
||||
playlist_spotify_id, tracks_to_find, api_snapshot_id
|
||||
# Use one-page scan to try find some of the tracks
|
||||
progress_offset, _ = get_playlist_batch_progress(
|
||||
playlist_spotify_id
|
||||
)
|
||||
|
||||
# Update found tracks with new snapshot_id
|
||||
tracks_batch = get_playlist_tracks(
|
||||
playlist_spotify_id,
|
||||
limit=batch_limit,
|
||||
offset=progress_offset,
|
||||
)
|
||||
batch_items = (
|
||||
tracks_batch.get("items", []) if tracks_batch else []
|
||||
)
|
||||
found_tracks = []
|
||||
remaining_to_find = set(tracks_to_find)
|
||||
for item in batch_items:
|
||||
track = item.get("track")
|
||||
if (
|
||||
track
|
||||
and track.get("id")
|
||||
and track["id"] in remaining_to_find
|
||||
and not track.get("is_local")
|
||||
):
|
||||
found_tracks.append(item)
|
||||
remaining_to_find.remove(track["id"])
|
||||
if found_tracks:
|
||||
add_tracks_to_playlist_db(
|
||||
playlist_spotify_id, found_tracks, api_snapshot_id
|
||||
)
|
||||
|
||||
# Mark not found tracks as removed
|
||||
if not_found_tracks:
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: {len(not_found_tracks)} tracks not found in playlist '{playlist_name}'. Marking as removed."
|
||||
)
|
||||
mark_tracks_as_not_present_in_spotify(
|
||||
playlist_spotify_id, not_found_tracks
|
||||
)
|
||||
|
||||
# Update the playlist's m3u file after tracks are removed
|
||||
try:
|
||||
logger.info(
|
||||
f"Updating m3u file for playlist '{playlist_name}' after removing {len(not_found_tracks)} tracks."
|
||||
)
|
||||
update_playlist_m3u_file(playlist_spotify_id)
|
||||
except Exception as m3u_update_err:
|
||||
logger.error(
|
||||
f"Failed to update m3u file for playlist '{playlist_name}' after marking tracks as removed: {m3u_update_err}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Update playlist snapshot and continue to next playlist
|
||||
update_playlist_snapshot(
|
||||
playlist_spotify_id, api_snapshot_id, api_total_tracks
|
||||
# Advance offset for next run
|
||||
next_offset = progress_offset + len(batch_items)
|
||||
if batch_items and next_offset < api_total_tracks:
|
||||
set_playlist_batch_progress(
|
||||
playlist_spotify_id, next_offset, None
|
||||
)
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Finished targeted sync for playlist '{playlist_name}'. Snapshot ID updated to {api_snapshot_id}."
|
||||
f"Playlist Watch Manager: Targeted sync processed page (offset {progress_offset}, size {len(batch_items)}). Next offset set to {next_offset}."
|
||||
)
|
||||
else:
|
||||
# End of scan cycle for targeted mode; reset progress cursor
|
||||
set_playlist_batch_progress(
|
||||
playlist_spotify_id, 0, None
|
||||
)
|
||||
logger.info(
|
||||
"Playlist Watch Manager: Targeted sync reached end of playlist. Resetting scan offset to 0."
|
||||
)
|
||||
# Do not update playlist snapshot here; only when full sync finishes
|
||||
continue
|
||||
else:
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Playlist '{playlist_name}' has changed. New snapshot_id: {api_snapshot_id}. Proceeding with full check."
|
||||
|
||||
if needs_full_sync:
|
||||
# Multi-run full sync: process only one batch per run
|
||||
progress_offset, processing_snapshot = get_playlist_batch_progress(
|
||||
playlist_spotify_id
|
||||
)
|
||||
# If processing a new snapshot or no processing snapshot recorded, start from offset 0
|
||||
if (
|
||||
not processing_snapshot
|
||||
or processing_snapshot != api_snapshot_id
|
||||
or progress_offset >= api_total_tracks
|
||||
):
|
||||
progress_offset = 0
|
||||
set_playlist_batch_progress(
|
||||
playlist_spotify_id, 0, api_snapshot_id
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Snapshot checking disabled. Proceeding with full check for playlist '{playlist_name}'."
|
||||
f"Playlist Watch Manager: Starting/Resetting full sync for '{playlist_name}' snapshot {api_snapshot_id}."
|
||||
)
|
||||
|
||||
# Fetch all tracks using the optimized function
|
||||
# This happens when:
|
||||
# 1. Playlist snapshot_id has changed (full sync needed)
|
||||
# 2. Snapshot checking is disabled (full sync always)
|
||||
# 3. Database is empty but API has tracks (full sync needed)
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Fetching all tracks for playlist '{playlist_name}' ({playlist_spotify_id}) with {api_total_tracks} total tracks."
|
||||
f"Playlist Watch Manager: Fetching one batch (limit={batch_limit}, offset={progress_offset}) for playlist '{playlist_name}'."
|
||||
)
|
||||
|
||||
all_api_track_items = []
|
||||
offset = 0
|
||||
limit = 100 # Use maximum batch size for efficiency
|
||||
|
||||
while offset < api_total_tracks:
|
||||
try:
|
||||
# Use the optimized get_playlist_tracks function
|
||||
tracks_batch = get_playlist_tracks(
|
||||
playlist_spotify_id, limit=limit, offset=offset
|
||||
playlist_spotify_id, limit=batch_limit, offset=progress_offset
|
||||
)
|
||||
batch_items = tracks_batch.get("items", []) if tracks_batch else []
|
||||
|
||||
if not tracks_batch or "items" not in tracks_batch:
|
||||
logger.warning(
|
||||
f"Playlist Watch Manager: No tracks returned for playlist {playlist_spotify_id} at offset {offset}"
|
||||
)
|
||||
break
|
||||
|
||||
batch_items = tracks_batch.get("items", [])
|
||||
if not batch_items:
|
||||
break
|
||||
|
||||
all_api_track_items.extend(batch_items)
|
||||
offset += len(batch_items)
|
||||
|
||||
# Add small delay between batches to be respectful to API
|
||||
if offset < api_total_tracks:
|
||||
time.sleep(0.1)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Playlist Watch Manager: Error fetching tracks batch for playlist {playlist_spotify_id} at offset {offset}: {e}"
|
||||
)
|
||||
break
|
||||
|
||||
current_api_track_ids = set()
|
||||
api_track_id_to_item_map = {}
|
||||
api_track_position_map: dict[str, int] = {}
|
||||
# Build maps for quick lookup and position within the playlist (1-based)
|
||||
for idx, item in enumerate(
|
||||
all_api_track_items, start=1
|
||||
): # Use overall playlist index for numbering
|
||||
track = item.get("track")
|
||||
if track and track.get("id") and not track.get("is_local"):
|
||||
track_id = track["id"]
|
||||
current_api_track_ids.add(track_id)
|
||||
api_track_id_to_item_map[track_id] = item
|
||||
api_track_position_map[track_id] = idx
|
||||
|
||||
# Build quick lookup for new tracks vs DB
|
||||
db_track_ids = get_playlist_track_ids_from_db(playlist_spotify_id)
|
||||
|
||||
new_track_ids_for_download = current_api_track_ids - db_track_ids
|
||||
queued_for_download_count = 0
|
||||
if new_track_ids_for_download:
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download."
|
||||
)
|
||||
for track_id in new_track_ids_for_download:
|
||||
api_item = api_track_id_to_item_map.get(track_id)
|
||||
if not api_item or not api_item.get("track"):
|
||||
logger.warning(
|
||||
f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue."
|
||||
)
|
||||
for item in batch_items:
|
||||
track = item.get("track")
|
||||
if not track or not track.get("id") or track.get("is_local"):
|
||||
continue
|
||||
|
||||
track_to_queue = api_item["track"]
|
||||
# Compute per-track formatting overrides for playlist placeholders
|
||||
position_in_playlist = api_track_position_map.get(track_id)
|
||||
track_id = track["id"]
|
||||
if track_id not in db_track_ids:
|
||||
# Compute per-track formatting overrides
|
||||
position_in_playlist = None # Unknown without full context; use None so %playlistnum% resolves to '' or basic padding
|
||||
custom_dir_format, custom_track_format = (
|
||||
_apply_playlist_placeholders(
|
||||
base_dir_fmt,
|
||||
@@ -564,15 +599,14 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
pad_tracks,
|
||||
)
|
||||
)
|
||||
|
||||
task_payload = {
|
||||
"download_type": "track",
|
||||
"url": construct_spotify_url(track_id, "track"),
|
||||
"name": track_to_queue.get("name", "Unknown Track"),
|
||||
"name": track.get("name", "Unknown Track"),
|
||||
"artist": ", ".join(
|
||||
[
|
||||
a["name"]
|
||||
for a in track_to_queue.get("artists", [])
|
||||
for a in track.get("artists", [])
|
||||
if a.get("name")
|
||||
]
|
||||
),
|
||||
@@ -581,9 +615,8 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
"playlist_id": playlist_spotify_id,
|
||||
"playlist_name": playlist_name,
|
||||
"track_spotify_id": track_id,
|
||||
"track_item_for_db": api_item, # Pass full API item for DB update on completion
|
||||
"track_item_for_db": item,
|
||||
},
|
||||
# Override formats so %playlist% and %playlistnum% resolve now per track
|
||||
"custom_dir_format": custom_dir_format,
|
||||
"custom_track_format": custom_track_format,
|
||||
}
|
||||
@@ -591,80 +624,63 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
task_id_or_none = download_queue_manager.add_task(
|
||||
task_payload, from_watch_job=True
|
||||
)
|
||||
if task_id_or_none: # Task was newly queued
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'."
|
||||
)
|
||||
if task_id_or_none:
|
||||
queued_for_download_count += 1
|
||||
# If task_id_or_none is None, it was a duplicate and not re-queued, Celery manager handles logging.
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}",
|
||||
f"Playlist Watch Manager: Failed to queue download for track {track_id} from playlist '{playlist_name}': {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'."
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'."
|
||||
)
|
||||
|
||||
# Update DB for tracks that are still present in API (e.g. update 'last_seen_in_spotify')
|
||||
# add_tracks_to_playlist_db handles INSERT OR REPLACE, updating existing entries.
|
||||
# We should pass all current API tracks to ensure their `last_seen_in_spotify`, `is_present_in_spotify`, and `snapshot_id` are updated.
|
||||
if (
|
||||
all_api_track_items
|
||||
): # If there are any tracks in the API for this playlist
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'."
|
||||
)
|
||||
# Refresh/mark present for items in this batch
|
||||
if batch_items:
|
||||
add_tracks_to_playlist_db(
|
||||
playlist_spotify_id, all_api_track_items, api_snapshot_id
|
||||
playlist_spotify_id, batch_items, api_snapshot_id
|
||||
)
|
||||
|
||||
removed_db_ids = db_track_ids - current_api_track_ids
|
||||
if removed_db_ids:
|
||||
# Advance or finalize progress
|
||||
next_offset = progress_offset + len(batch_items)
|
||||
if batch_items and next_offset < api_total_tracks:
|
||||
set_playlist_batch_progress(
|
||||
playlist_spotify_id, next_offset, api_snapshot_id
|
||||
)
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB."
|
||||
f"Playlist Watch Manager: Processed batch size {len(batch_items)} at offset {progress_offset}. Next offset {next_offset}."
|
||||
)
|
||||
mark_tracks_as_not_present_in_spotify(
|
||||
playlist_spotify_id, list(removed_db_ids)
|
||||
# Do not update snapshot yet; continue next run
|
||||
else:
|
||||
# Finished this snapshot's full sync
|
||||
set_playlist_batch_progress(playlist_spotify_id, 0, None)
|
||||
update_playlist_snapshot(
|
||||
playlist_spotify_id, api_snapshot_id, api_total_tracks
|
||||
)
|
||||
|
||||
# Update the playlist's m3u file after any changes (new tracks queued or tracks removed)
|
||||
if new_track_ids_for_download or removed_db_ids:
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Full sync completed for '{playlist_name}'. Snapshot updated to {api_snapshot_id}."
|
||||
)
|
||||
# Optionally update m3u at the end
|
||||
try:
|
||||
logger.info(
|
||||
f"Updating m3u file for playlist '{playlist_name}' after playlist changes."
|
||||
)
|
||||
update_playlist_m3u_file(playlist_spotify_id)
|
||||
except Exception as m3u_update_err:
|
||||
logger.error(
|
||||
f"Failed to update m3u file for playlist '{playlist_name}' after playlist changes: {m3u_update_err}",
|
||||
f"Failed to update m3u file for playlist '{playlist_name}' after full sync: {m3u_update_err}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
update_playlist_snapshot(
|
||||
playlist_spotify_id, api_snapshot_id, api_total_tracks
|
||||
) # api_total_tracks from initial fetch
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated to {api_snapshot_id}. API Total Tracks: {api_total_tracks}. Queued {queued_for_download_count} new tracks."
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Only sleep between items when running a batch (no specific ID)
|
||||
if not specific_playlist_id:
|
||||
time.sleep(max(1, config.get("delayBetweenPlaylistsSeconds", 2)))
|
||||
|
||||
logger.info("Playlist Watch Manager: Finished checking all watched playlists.")
|
||||
|
||||
|
||||
def check_watched_artists(specific_artist_id: str = None):
|
||||
"""Checks watched artists for new albums and queues downloads."""
|
||||
"""Checks watched artists for new albums and queues downloads. Processes one page per run to avoid rate limits."""
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}"
|
||||
)
|
||||
@@ -675,6 +691,11 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Watching for album groups: {watched_album_groups}"
|
||||
)
|
||||
# Determine pagination limit for artist albums (Spotify max 50)
|
||||
try:
|
||||
artist_batch_limit = max(1, min(int(config.get("maxItemsPerRun", 50)), 50))
|
||||
except Exception:
|
||||
artist_batch_limit = 50
|
||||
|
||||
if specific_artist_id:
|
||||
artist_obj_in_db = get_watched_artist(specific_artist_id)
|
||||
@@ -694,21 +715,22 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
for artist_in_db in artists_to_check:
|
||||
artist_spotify_id = artist_in_db["spotify_id"]
|
||||
artist_name = artist_in_db["name"]
|
||||
artist_lock = _get_artist_lock(artist_spotify_id)
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Waiting for lock on artist {artist_spotify_id}..."
|
||||
)
|
||||
with artist_lock:
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Acquired lock for artist {artist_spotify_id}."
|
||||
)
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})..."
|
||||
)
|
||||
|
||||
try:
|
||||
# Use the optimized artist discography function with pagination
|
||||
all_artist_albums_from_api: List[Dict[str, Any]] = []
|
||||
offset = 0
|
||||
limit = 50 # Spotify API limit for artist albums
|
||||
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Fetching albums for artist '{artist_name}' ({artist_spotify_id})"
|
||||
)
|
||||
|
||||
while True:
|
||||
# One page per run
|
||||
offset = get_artist_batch_next_offset(artist_spotify_id)
|
||||
limit = artist_batch_limit
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}"
|
||||
)
|
||||
@@ -716,160 +738,79 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
artist_spotify_id, "artist_discography", limit=limit, offset=offset
|
||||
)
|
||||
|
||||
if not artist_albums_page or not isinstance(
|
||||
artist_albums_page.get("items"), list
|
||||
):
|
||||
logger.warning(
|
||||
f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}"
|
||||
current_page_albums = (
|
||||
artist_albums_page.get("items", [])
|
||||
if artist_albums_page
|
||||
and isinstance(artist_albums_page.get("items"), list)
|
||||
else []
|
||||
)
|
||||
break
|
||||
|
||||
current_page_albums = artist_albums_page.get("items", [])
|
||||
if not current_page_albums:
|
||||
logger.info(
|
||||
f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}."
|
||||
)
|
||||
break
|
||||
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'."
|
||||
)
|
||||
all_artist_albums_from_api.extend(current_page_albums)
|
||||
|
||||
# Correct pagination: Check if Spotify indicates a next page URL
|
||||
# The `next` field in Spotify API responses is a URL to the next page or null.
|
||||
if artist_albums_page.get("next"):
|
||||
offset += limit # CORRECT: Increment offset by the limit used for the request
|
||||
else:
|
||||
logger.info(
|
||||
f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}."
|
||||
)
|
||||
break
|
||||
|
||||
# total_albums_from_api = len(all_artist_albums_from_api)
|
||||
# Use the 'total' field from the API response for a more accurate count of all available albums (matching current API filter if any)
|
||||
api_reported_total_albums = (
|
||||
artist_albums_page.get("total", 0)
|
||||
if "artist_albums_page" in locals() and artist_albums_page
|
||||
else len(all_artist_albums_from_api)
|
||||
)
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}."
|
||||
artist_albums_page.get("total", 0) if artist_albums_page else 0
|
||||
)
|
||||
|
||||
db_album_ids = get_artist_album_ids_from_db(artist_spotify_id)
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes."
|
||||
)
|
||||
|
||||
queued_for_download_count = 0
|
||||
processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination)
|
||||
processed_album_ids_in_run = set()
|
||||
|
||||
for album_data in all_artist_albums_from_api:
|
||||
for album_data in current_page_albums:
|
||||
album_id = album_data.get("id")
|
||||
album_name = album_data.get("name", "Unknown Album")
|
||||
album_group = album_data.get("album_group", "N/A").lower()
|
||||
album_type = album_data.get("album_type", "N/A").lower()
|
||||
|
||||
if not album_id:
|
||||
logger.warning(
|
||||
f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}"
|
||||
)
|
||||
continue
|
||||
|
||||
if album_id in processed_album_ids_in_run:
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping."
|
||||
)
|
||||
continue
|
||||
processed_album_ids_in_run.add(album_id)
|
||||
|
||||
# Filter based on watchedArtistAlbumGroup
|
||||
# The album_group field is generally preferred for this type of categorization as per Spotify docs.
|
||||
is_matching_group = album_group in watched_album_groups
|
||||
|
||||
logger.debug(
|
||||
f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}."
|
||||
)
|
||||
|
||||
if not is_matching_group:
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}."
|
||||
)
|
||||
album_group = album_data.get("album_group", "N/A").lower()
|
||||
if album_group not in watched_album_groups:
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group."
|
||||
)
|
||||
|
||||
if album_id not in db_album_ids:
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download."
|
||||
)
|
||||
|
||||
album_name = album_data.get("name", "Unknown Album")
|
||||
album_artists_list = album_data.get("artists", [])
|
||||
album_main_artist_name = (
|
||||
album_artists_list[0].get("name", "Unknown Artist")
|
||||
if album_artists_list
|
||||
else "Unknown Artist"
|
||||
)
|
||||
|
||||
task_payload = {
|
||||
"download_type": "album", # Or "track" if downloading individual tracks of album later
|
||||
"download_type": "album",
|
||||
"url": construct_spotify_url(album_id, "album"),
|
||||
"name": album_name,
|
||||
"artist": album_main_artist_name, # Primary artist of the album
|
||||
"artist": album_main_artist_name,
|
||||
"orig_request": {
|
||||
"source": "artist_watch",
|
||||
"artist_spotify_id": artist_spotify_id, # Watched artist
|
||||
"artist_spotify_id": artist_spotify_id,
|
||||
"artist_name": artist_name,
|
||||
"album_spotify_id": album_id,
|
||||
"album_data_for_db": album_data, # Pass full API album object for DB update on completion/queuing
|
||||
"album_data_for_db": album_data,
|
||||
},
|
||||
}
|
||||
try:
|
||||
# Add to DB first with task_id, then queue. Or queue and add task_id to DB.
|
||||
# Let's use add_or_update_album_for_artist to record it with a task_id before queuing.
|
||||
# The celery_queue_manager.add_task might return None if it's a duplicate.
|
||||
|
||||
# Record the album in DB as being processed for download
|
||||
# Task_id will be added if successfully queued
|
||||
|
||||
# We should call add_task first, and if it returns a task_id (not a duplicate), then update our DB.
|
||||
task_id_or_none = download_queue_manager.add_task(
|
||||
task_payload, from_watch_job=True
|
||||
)
|
||||
|
||||
if task_id_or_none: # Task was newly queued
|
||||
# REMOVED: add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=task_id_or_none, is_download_complete=False)
|
||||
# The album will be added/updated in the DB by celery_tasks.py upon successful download completion.
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success."
|
||||
)
|
||||
if task_id_or_none:
|
||||
queued_for_download_count += 1
|
||||
# If task_id_or_none is None, it was a duplicate. Celery manager handles logging.
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Advance offset or finalize
|
||||
if artist_albums_page and artist_albums_page.get("next"):
|
||||
next_offset = offset + len(current_page_albums)
|
||||
set_artist_batch_next_offset(artist_spotify_id, next_offset)
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Processed page size {len(current_page_albums)} at offset {offset}. Next offset {next_offset}."
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue."
|
||||
)
|
||||
# Optionally, update its entry (e.g. last_seen, or if details changed), but for now, we only queue new ones.
|
||||
# add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=False) # would update added_to_db_at
|
||||
|
||||
logger.info(
|
||||
f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums."
|
||||
)
|
||||
|
||||
set_artist_batch_next_offset(artist_spotify_id, 0)
|
||||
update_artist_metadata_after_check(
|
||||
artist_spotify_id, api_reported_total_albums
|
||||
)
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}."
|
||||
f"Artist Watch Manager: Completed discography scan for '{artist_name}'. Metadata updated."
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -878,14 +819,17 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Only sleep between items when running a batch (no specific ID)
|
||||
if not specific_artist_id:
|
||||
time.sleep(max(1, config.get("delayBetweenArtistsSeconds", 5)))
|
||||
|
||||
logger.info("Artist Watch Manager: Finished checking all watched artists.")
|
||||
|
||||
|
||||
def playlist_watch_scheduler():
|
||||
"""Periodically calls check_watched_playlists and check_watched_artists."""
|
||||
"""Periodically checks one watched item (playlist or artist) per interval in round-robin order."""
|
||||
logger.info("Watch Scheduler: Thread started.")
|
||||
global _round_robin_index
|
||||
|
||||
while not STOP_EVENT.is_set():
|
||||
current_config = get_watch_config() # Get latest config for this run
|
||||
@@ -901,38 +845,105 @@ def playlist_watch_scheduler():
|
||||
) # Still respect poll interval for checking config again
|
||||
continue # Skip to next iteration
|
||||
|
||||
# Build the current list of items to watch (playlists and artists)
|
||||
try:
|
||||
logger.info("Watch Scheduler: Starting playlist check run.")
|
||||
check_watched_playlists()
|
||||
logger.info("Watch Scheduler: Playlist check run completed.")
|
||||
playlists_list = get_watched_playlists() or []
|
||||
recorded_playlists = [("playlist", p["spotify_id"]) for p in playlists_list]
|
||||
artists_list = get_watched_artists() or []
|
||||
recorded_artists = [("artist", a["spotify_id"]) for a in artists_list]
|
||||
all_items = recorded_playlists + recorded_artists
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}",
|
||||
exc_info=True,
|
||||
f"Watch Scheduler: Failed to build items list: {e}", exc_info=True
|
||||
)
|
||||
all_items = []
|
||||
|
||||
# Add a small delay between playlist and artist checks if desired
|
||||
# time.sleep(current_config.get("delay_between_check_types_seconds", 10))
|
||||
if STOP_EVENT.is_set():
|
||||
break # Check stop event again before starting artist check
|
||||
if not all_items:
|
||||
logger.info(
|
||||
"Watch Scheduler: No watched playlists or artists. Waiting for next interval."
|
||||
)
|
||||
STOP_EVENT.wait(interval)
|
||||
continue
|
||||
|
||||
# Pick the next item in round-robin order
|
||||
index = _round_robin_index % len(all_items)
|
||||
item_type, item_id = all_items[index]
|
||||
_round_robin_index += 1
|
||||
|
||||
try:
|
||||
logger.info("Watch Scheduler: Starting artist check run.")
|
||||
check_watched_artists()
|
||||
logger.info("Watch Scheduler: Artist check run completed.")
|
||||
if item_type == "playlist":
|
||||
logger.info(
|
||||
f"Watch Scheduler: Checking next playlist {item_id} (index {index})."
|
||||
)
|
||||
check_watched_playlists(specific_playlist_id=item_id)
|
||||
elif item_type == "artist":
|
||||
logger.info(
|
||||
f"Watch Scheduler: Checking next artist {item_id} (index {index})."
|
||||
)
|
||||
check_watched_artists(specific_artist_id=item_id)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Watch Scheduler: Unknown item type '{item_type}' for id '{item_id}'. Skipping."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}",
|
||||
f"Watch Scheduler: Unhandled exception during item check ({item_type}:{item_id}): {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Watch Scheduler: All checks complete. Next run in {interval} seconds."
|
||||
f"Watch Scheduler: One-item check complete. Next run in {interval} seconds."
|
||||
)
|
||||
STOP_EVENT.wait(interval)
|
||||
logger.info("Watch Scheduler: Thread stopped.")
|
||||
|
||||
|
||||
def run_playlist_check_over_intervals(playlist_spotify_id: str) -> None:
|
||||
"""Run checks for a specific playlist over repeated intervals until sync completes.
|
||||
Spreads batches across watchPollInterval to avoid rate limits.
|
||||
"""
|
||||
logger.info(
|
||||
f"Manual Playlist Runner: Starting interval-based sync for playlist {playlist_spotify_id}."
|
||||
)
|
||||
while not STOP_EVENT.is_set():
|
||||
try:
|
||||
check_watched_playlists(specific_playlist_id=playlist_spotify_id)
|
||||
# Determine if we are done: no active processing snapshot and no pending sync
|
||||
cfg = get_watch_config()
|
||||
interval = cfg.get("watchPollIntervalSeconds", 3600)
|
||||
metadata = get_playlist_metadata(playlist_spotify_id)
|
||||
if not metadata:
|
||||
logger.warning(
|
||||
f"Manual Playlist Runner: Could not load metadata for {playlist_spotify_id}. Stopping."
|
||||
)
|
||||
break
|
||||
api_snapshot_id = metadata.get("snapshot_id")
|
||||
total = metadata.get("tracks", {}).get("total", 0)
|
||||
progress_offset, processing_snapshot = get_playlist_batch_progress(
|
||||
playlist_spotify_id
|
||||
)
|
||||
needs_sync, _ = needs_track_sync(
|
||||
playlist_spotify_id, api_snapshot_id, total
|
||||
)
|
||||
if processing_snapshot is None and not needs_sync:
|
||||
logger.info(
|
||||
f"Manual Playlist Runner: Sync complete for playlist {playlist_spotify_id}."
|
||||
)
|
||||
break
|
||||
logger.info(
|
||||
f"Manual Playlist Runner: Waiting {interval}s before next batch for playlist {playlist_spotify_id}."
|
||||
)
|
||||
if STOP_EVENT.wait(interval):
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Manual Playlist Runner: Error during interval sync for {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
break
|
||||
logger.info(f"Manual Playlist Runner: Finished for playlist {playlist_spotify_id}.")
|
||||
|
||||
|
||||
# --- Global thread for the scheduler ---
|
||||
_watch_scheduler_thread = None # Renamed from _playlist_watch_thread
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "spotizerr-ui",
|
||||
"private": true,
|
||||
"version": "3.2.0",
|
||||
"version": "3.2.1",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
|
||||
@@ -85,6 +85,7 @@ export function AccountsTab() {
|
||||
onSuccess: () => {
|
||||
toast.success("Account added successfully!");
|
||||
queryClient.invalidateQueries({ queryKey: ["credentials", activeService] });
|
||||
queryClient.invalidateQueries({ queryKey: ["config"] }); // Invalidate config to update active Spotify account in UI
|
||||
setIsAdding(false);
|
||||
setSubmitError(null);
|
||||
reset();
|
||||
|
||||
@@ -13,6 +13,7 @@ interface WatchSettings {
|
||||
enabled: boolean;
|
||||
watchPollIntervalSeconds: number;
|
||||
watchedArtistAlbumGroup: AlbumGroup[];
|
||||
maxItemsPerRun: number;
|
||||
}
|
||||
|
||||
interface DownloadSettings {
|
||||
@@ -92,8 +93,9 @@ export function WatchTab() {
|
||||
setTimeout(() => setSaveStatus("idle"), 3000);
|
||||
queryClient.invalidateQueries({ queryKey: ["watchConfig"] });
|
||||
},
|
||||
onError: (error) => {
|
||||
toast.error(`Failed to save settings: ${error.message}`);
|
||||
onError: (error: any) => {
|
||||
const message = error?.response?.data?.error || error?.message || "Unknown error";
|
||||
toast.error(`Failed to save settings: ${message}`);
|
||||
setSaveStatus("error");
|
||||
setTimeout(() => setSaveStatus("idle"), 3000);
|
||||
},
|
||||
@@ -108,6 +110,7 @@ export function WatchTab() {
|
||||
}, [config, reset]);
|
||||
|
||||
const watchEnabled = watch("enabled");
|
||||
const maxItemsPerRunValue = watch("maxItemsPerRun");
|
||||
|
||||
// Validation effect for watch + download method requirement
|
||||
useEffect(() => {
|
||||
@@ -126,8 +129,14 @@ export function WatchTab() {
|
||||
error = `Watch with Fallback requires accounts for both services. Missing: ${missingServices.join(", ")}. Configure accounts in the Accounts tab.`;
|
||||
}
|
||||
|
||||
// Validate maxItemsPerRun range (1..50)
|
||||
const mir = Number(maxItemsPerRunValue);
|
||||
if (!error && (Number.isNaN(mir) || mir < 1 || mir > 50)) {
|
||||
error = "Max items per run must be between 1 and 50.";
|
||||
}
|
||||
|
||||
setValidationError(error);
|
||||
}, [watchEnabled, downloadConfig?.realTime, downloadConfig?.fallback, spotifyCredentials?.length, deezerCredentials?.length]);
|
||||
}, [watchEnabled, downloadConfig?.realTime, downloadConfig?.fallback, spotifyCredentials?.length, deezerCredentials?.length, maxItemsPerRunValue]);
|
||||
|
||||
const onSubmit: SubmitHandler<WatchSettings> = (data) => {
|
||||
// Check validation before submitting
|
||||
@@ -148,9 +157,18 @@ export function WatchTab() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate maxItemsPerRun in handler too, to be safe
|
||||
const mir = Number(data.maxItemsPerRun);
|
||||
if (Number.isNaN(mir) || mir < 1 || mir > 50) {
|
||||
setValidationError("Max items per run must be between 1 and 50.");
|
||||
toast.error("Validation failed: Max items per run must be between 1 and 50.");
|
||||
return;
|
||||
}
|
||||
|
||||
mutation.mutate({
|
||||
...data,
|
||||
watchPollIntervalSeconds: Number(data.watchPollIntervalSeconds),
|
||||
maxItemsPerRun: Number(data.maxItemsPerRun),
|
||||
});
|
||||
};
|
||||
|
||||
@@ -225,7 +243,20 @@ export function WatchTab() {
|
||||
{...register("watchPollIntervalSeconds")}
|
||||
className="block w-full p-2 border bg-input-background dark:bg-input-background-dark border-input-border dark:border-input-border-dark rounded-md focus:outline-none focus:ring-2 focus:ring-input-focus"
|
||||
/>
|
||||
<p className="text-sm text-content-muted dark:text-content-muted-dark mt-1">How often to check watched items for updates.</p>
|
||||
<p className="text-sm text-content-muted dark:text-content-muted-dark mt-1">How often to check for new items in watchlist.</p>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-2">
|
||||
<label htmlFor="maxItemsPerRun" className="text-content-primary dark:text-content-primary-dark">Max Items Per Run</label>
|
||||
<input
|
||||
id="maxItemsPerRun"
|
||||
type="number"
|
||||
min="1"
|
||||
max="50"
|
||||
{...register("maxItemsPerRun")}
|
||||
className="block w-full p-2 border bg-input-background dark:bg-input-background-dark border-input-border dark:border-input-border-dark rounded-md focus:outline-none focus:ring-2 focus:ring-input-focus"
|
||||
/>
|
||||
<p className="text-sm text-content-muted dark:text-content-muted-dark mt-1">Batch size per watch cycle (1–50).</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -102,13 +102,16 @@ const defaultSettings: FlatAppSettings = {
|
||||
spotifyMetadata: true,
|
||||
watch: {
|
||||
enabled: false,
|
||||
maxItemsPerRun: 50,
|
||||
watchPollIntervalSeconds: 3600,
|
||||
watchedArtistAlbumGroup: ["album", "single"],
|
||||
},
|
||||
realTimeMultiplier: 0,
|
||||
};
|
||||
|
||||
interface FetchedCamelCaseSettings {
|
||||
watchEnabled?: boolean;
|
||||
watch?: { enabled: boolean };
|
||||
watch?: { enabled: boolean; maxItemsPerRun?: number; watchPollIntervalSeconds?: number; watchedArtistAlbumGroup?: string[] };
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
@@ -132,6 +135,14 @@ const fetchSettings = async (): Promise<FlatAppSettings> => {
|
||||
// Ensure required frontend-only fields exist
|
||||
recursiveQuality: Boolean((camelData as any).recursiveQuality ?? false),
|
||||
realTimeMultiplier: Number((camelData as any).realTimeMultiplier ?? 0),
|
||||
// Ensure watch subkeys default if missing
|
||||
watch: {
|
||||
...(camelData.watch as any),
|
||||
enabled: Boolean((camelData.watch as any)?.enabled ?? false),
|
||||
maxItemsPerRun: Number((camelData.watch as any)?.maxItemsPerRun ?? 50),
|
||||
watchPollIntervalSeconds: Number((camelData.watch as any)?.watchPollIntervalSeconds ?? 3600),
|
||||
watchedArtistAlbumGroup: (camelData.watch as any)?.watchedArtistAlbumGroup ?? ["album", "single"],
|
||||
},
|
||||
};
|
||||
|
||||
return withDefaults;
|
||||
|
||||
@@ -37,7 +37,9 @@ export interface AppSettings {
|
||||
spotifyMetadata: boolean;
|
||||
watch: {
|
||||
enabled: boolean;
|
||||
// Add other watch properties from the old type if they still exist in the API response
|
||||
maxItemsPerRun: number;
|
||||
watchPollIntervalSeconds: number;
|
||||
watchedArtistAlbumGroup: string[];
|
||||
};
|
||||
// Add other root-level properties from the API if they exist
|
||||
realTimeMultiplier: number;
|
||||
|
||||
@@ -246,9 +246,10 @@ export const Artist = () => {
|
||||
return <div>Artist data could not be fully loaded. Please try again later.</div>;
|
||||
}
|
||||
|
||||
const artistAlbums = applyFilters(albums.filter((album) => album.album_type === "album"));
|
||||
const artistSingles = applyFilters(albums.filter((album) => album.album_type === "single"));
|
||||
const artistCompilations = applyFilters(albums.filter((album) => album.album_type === "compilation"));
|
||||
const artistAlbums = applyFilters(albums.filter((album) => (album.album_group ?? album.album_type) === "album"));
|
||||
const artistSingles = applyFilters(albums.filter((album) => (album.album_group ?? album.album_type) === "single"));
|
||||
const artistCompilations = applyFilters(albums.filter((album) => (album.album_group ?? album.album_type) === "compilation"));
|
||||
const artistAppearsOn = applyFilters(albums.filter((album) => (album.album_group ?? "") === "appears_on"));
|
||||
|
||||
return (
|
||||
<div className="artist-page">
|
||||
@@ -364,6 +365,18 @@ export const Artist = () => {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Appears On */}
|
||||
{artistAppearsOn.length > 0 && (
|
||||
<div className="mb-12">
|
||||
<h2 className="text-3xl font-bold mb-6 text-content-primary dark:text-content-primary-dark">Appears On</h2>
|
||||
<div className="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-4 xl:grid-cols-5 gap-6">
|
||||
{artistAppearsOn.map((album) => (
|
||||
<AlbumCard key={album.id} album={album} onDownload={() => handleDownloadAlbum(album)} />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* sentinel + loading */}
|
||||
<div className="flex flex-col items-center gap-2">
|
||||
{loadingMore && <div className="py-4">Loading more...</div>}
|
||||
|
||||
@@ -44,6 +44,7 @@ export interface AlbumType {
|
||||
id: string;
|
||||
name: string;
|
||||
album_type: "album" | "single" | "compilation";
|
||||
album_group?: "album" | "single" | "compilation" | "appears_on";
|
||||
artists: ArtistType[];
|
||||
images: ImageType[];
|
||||
release_date: string;
|
||||
|
||||
Reference in New Issue
Block a user