2.0 is coming
This commit is contained in:
@@ -94,16 +94,20 @@ class CeleryDownloadQueueManager:
|
||||
self.paused = False
|
||||
print(f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}")
|
||||
|
||||
def add_task(self, task):
|
||||
def add_task(self, task: dict, from_watch_job: bool = False):
|
||||
"""
|
||||
Add a new download task to the Celery queue.
|
||||
If a duplicate active task is found, a new task ID is created and immediately set to an ERROR state.
|
||||
- If from_watch_job is True and an active duplicate is found, the task is not queued and None is returned.
|
||||
- If from_watch_job is False and an active duplicate is found, a new task ID is created,
|
||||
set to an ERROR state indicating the duplicate, and this new error task's ID is returned.
|
||||
|
||||
Args:
|
||||
task (dict): Task parameters including download_type, url, etc.
|
||||
from_watch_job (bool): If True, duplicate active tasks are skipped. Defaults to False.
|
||||
|
||||
Returns:
|
||||
str: Task ID (either for a new task or for a new error-state task if duplicate detected).
|
||||
str | None: Task ID if successfully queued or an error task ID for non-watch duplicates.
|
||||
None if from_watch_job is True and an active duplicate was found.
|
||||
"""
|
||||
try:
|
||||
# Extract essential parameters for duplicate check
|
||||
@@ -111,20 +115,18 @@ class CeleryDownloadQueueManager:
|
||||
incoming_type = task.get("download_type", "unknown")
|
||||
|
||||
if not incoming_url:
|
||||
# This should ideally be validated before calling add_task
|
||||
# For now, let it proceed and potentially fail in Celery task if URL is vital and missing.
|
||||
# Or, create an error task immediately if URL is strictly required for any task logging.
|
||||
logger.warning("Task being added with no URL. Duplicate check might be unreliable.")
|
||||
|
||||
# --- Check for Duplicates ---
|
||||
NON_BLOCKING_STATES = [
|
||||
ProgressState.COMPLETE,
|
||||
ProgressState.CANCELLED,
|
||||
ProgressState.ERROR
|
||||
ProgressState.ERROR,
|
||||
ProgressState.ERROR_RETRIED,
|
||||
ProgressState.ERROR_AUTO_CLEANED
|
||||
]
|
||||
|
||||
all_existing_tasks_summary = get_all_tasks()
|
||||
if incoming_url: # Only check for duplicates if we have a URL
|
||||
all_existing_tasks_summary = get_all_tasks()
|
||||
if incoming_url:
|
||||
for task_summary in all_existing_tasks_summary:
|
||||
existing_task_id = task_summary.get("task_id")
|
||||
if not existing_task_id:
|
||||
@@ -147,97 +149,65 @@ class CeleryDownloadQueueManager:
|
||||
message = f"Duplicate download: URL '{incoming_url}' (type: {incoming_type}) is already being processed by task {existing_task_id} (status: {existing_status})."
|
||||
logger.warning(message)
|
||||
|
||||
# Create a new task_id for this duplicate request and mark it as an error
|
||||
error_task_id = str(uuid.uuid4())
|
||||
|
||||
# Store minimal info for this error task
|
||||
error_task_info_payload = {
|
||||
"download_type": incoming_type,
|
||||
"type": task.get("type", incoming_type),
|
||||
"name": task.get("name", "Duplicate Task"),
|
||||
"artist": task.get("artist", ""),
|
||||
"url": incoming_url,
|
||||
"original_request": task.get("orig_request", task.get("original_request", {})),
|
||||
"created_at": time.time(),
|
||||
"is_duplicate_error_task": True
|
||||
}
|
||||
store_task_info(error_task_id, error_task_info_payload)
|
||||
|
||||
# Store error status for this new task_id
|
||||
error_status_payload = {
|
||||
"status": ProgressState.ERROR,
|
||||
"error": message,
|
||||
"existing_task_id": existing_task_id, # So client knows which task it duplicates
|
||||
"timestamp": time.time(),
|
||||
"type": error_task_info_payload["type"],
|
||||
"name": error_task_info_payload["name"],
|
||||
"artist": error_task_info_payload["artist"]
|
||||
}
|
||||
store_task_status(error_task_id, error_status_payload)
|
||||
|
||||
return error_task_id # Return the ID of this new error-state task
|
||||
# --- End Duplicate Check ---
|
||||
if from_watch_job:
|
||||
logger.info(f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}.")
|
||||
return None # Skip execution for watch jobs
|
||||
else:
|
||||
# Create a new task_id for this duplicate request and mark it as an error
|
||||
error_task_id = str(uuid.uuid4())
|
||||
error_task_info_payload = {
|
||||
"download_type": incoming_type,
|
||||
"type": task.get("type", incoming_type),
|
||||
"name": task.get("name", "Duplicate Task"),
|
||||
"artist": task.get("artist", ""),
|
||||
"url": incoming_url,
|
||||
"original_request": task.get("orig_request", task.get("original_request", {})),
|
||||
"created_at": time.time(),
|
||||
"is_duplicate_error_task": True
|
||||
}
|
||||
store_task_info(error_task_id, error_task_info_payload)
|
||||
error_status_payload = {
|
||||
"status": ProgressState.ERROR,
|
||||
"error": message,
|
||||
"existing_task_id": existing_task_id,
|
||||
"timestamp": time.time(),
|
||||
"type": error_task_info_payload["type"],
|
||||
"name": error_task_info_payload["name"],
|
||||
"artist": error_task_info_payload["artist"]
|
||||
}
|
||||
store_task_status(error_task_id, error_status_payload)
|
||||
return error_task_id # Return the ID of this new error-state task
|
||||
|
||||
# Proceed with normal task creation if no duplicate found or no URL to check
|
||||
download_type = task.get("download_type", "unknown")
|
||||
|
||||
# Debug existing task data
|
||||
logger.debug(f"Adding {download_type} task with data: {json.dumps({k: v for k, v in task.items() if k != 'orig_request'})}")
|
||||
|
||||
# Create a unique task ID
|
||||
task_id = str(uuid.uuid4())
|
||||
|
||||
# Get config parameters and process original request
|
||||
config_params = get_config_params()
|
||||
|
||||
# Extract original request or use empty dict
|
||||
original_request = task.get("orig_request", task.get("original_request", {}))
|
||||
|
||||
# Debug retry_url if present
|
||||
if "retry_url" in task:
|
||||
logger.debug(f"Task has retry_url: {task['retry_url']}")
|
||||
|
||||
# Build the complete task with config parameters
|
||||
complete_task = {
|
||||
"download_type": download_type,
|
||||
"type": task.get("type", download_type),
|
||||
"download_type": incoming_type,
|
||||
"type": task.get("type", incoming_type),
|
||||
"name": task.get("name", ""),
|
||||
"artist": task.get("artist", ""),
|
||||
"url": task.get("url", ""),
|
||||
|
||||
# Preserve retry_url if present
|
||||
"retry_url": task.get("retry_url", ""),
|
||||
|
||||
# Use main account from config
|
||||
"main": original_request.get("main", config_params['deezer']),
|
||||
|
||||
# Set fallback if enabled in config
|
||||
"fallback": original_request.get("fallback",
|
||||
config_params['spotify'] if config_params['fallback'] else None),
|
||||
|
||||
# Use default quality settings
|
||||
"quality": original_request.get("quality", config_params['deezerQuality']),
|
||||
|
||||
"fall_quality": original_request.get("fall_quality", config_params['spotifyQuality']),
|
||||
|
||||
# Parse boolean parameters from string values
|
||||
"real_time": self._parse_bool_param(original_request.get("real_time"), config_params['realTime']),
|
||||
|
||||
"custom_dir_format": original_request.get("custom_dir_format", config_params['customDirFormat']),
|
||||
"custom_track_format": original_request.get("custom_track_format", config_params['customTrackFormat']),
|
||||
|
||||
# Parse boolean parameters from string values
|
||||
"pad_tracks": self._parse_bool_param(original_request.get("tracknum_padding"), config_params['tracknum_padding']),
|
||||
|
||||
"retry_count": 0,
|
||||
"original_request": original_request,
|
||||
"created_at": time.time()
|
||||
}
|
||||
|
||||
# If from_watch_job is True, ensure track_details_for_db is passed through
|
||||
if from_watch_job and "track_details_for_db" in task:
|
||||
complete_task["track_details_for_db"] = task["track_details_for_db"]
|
||||
|
||||
# Store the task info in Redis for later retrieval
|
||||
store_task_info(task_id, complete_task)
|
||||
|
||||
# Store initial queued status
|
||||
store_task_status(task_id, {
|
||||
"status": ProgressState.QUEUED,
|
||||
"timestamp": time.time(),
|
||||
@@ -245,46 +215,35 @@ class CeleryDownloadQueueManager:
|
||||
"name": complete_task["name"],
|
||||
"artist": complete_task["artist"],
|
||||
"retry_count": 0,
|
||||
"queue_position": len(get_all_tasks()) + 1 # Approximate queue position
|
||||
"queue_position": len(get_all_tasks()) + 1
|
||||
})
|
||||
|
||||
# Launch the appropriate Celery task based on download_type
|
||||
celery_task = None
|
||||
celery_task_map = {
|
||||
"track": download_track,
|
||||
"album": download_album,
|
||||
"playlist": download_playlist
|
||||
}
|
||||
|
||||
if download_type == "track":
|
||||
celery_task = download_track.apply_async(
|
||||
kwargs=complete_task,
|
||||
task_id=task_id,
|
||||
countdown=0 if not self.paused else 3600 # Delay task if paused
|
||||
)
|
||||
elif download_type == "album":
|
||||
celery_task = download_album.apply_async(
|
||||
kwargs=complete_task,
|
||||
task_id=task_id,
|
||||
countdown=0 if not self.paused else 3600
|
||||
)
|
||||
elif download_type == "playlist":
|
||||
celery_task = download_playlist.apply_async(
|
||||
task_func = celery_task_map.get(incoming_type)
|
||||
if task_func:
|
||||
task_func.apply_async(
|
||||
kwargs=complete_task,
|
||||
task_id=task_id,
|
||||
countdown=0 if not self.paused else 3600
|
||||
)
|
||||
logger.info(f"Added {incoming_type} download task {task_id} to Celery queue.")
|
||||
return task_id
|
||||
else:
|
||||
# Store error status for unknown download type
|
||||
store_task_status(task_id, {
|
||||
"status": ProgressState.ERROR,
|
||||
"message": f"Unsupported download type: {download_type}",
|
||||
"message": f"Unsupported download type: {incoming_type}",
|
||||
"timestamp": time.time()
|
||||
})
|
||||
logger.error(f"Unsupported download type: {download_type}")
|
||||
return task_id # Still return the task_id so the error can be tracked
|
||||
|
||||
logger.info(f"Added {download_type} download task {task_id} to Celery queue")
|
||||
return task_id
|
||||
logger.error(f"Unsupported download type: {incoming_type}")
|
||||
return task_id
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding task to Celery queue: {e}", exc_info=True)
|
||||
# Generate a task ID even for failed tasks so we can track the error
|
||||
error_task_id = str(uuid.uuid4())
|
||||
store_task_status(error_task_id, {
|
||||
"status": ProgressState.ERROR,
|
||||
|
||||
@@ -15,6 +15,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
# Setup Redis and Celery
|
||||
from routes.utils.celery_config import REDIS_URL, REDIS_BACKEND, REDIS_PASSWORD, get_config_params
|
||||
# Import for playlist watch DB update
|
||||
from routes.utils.watch.db import add_single_track_to_playlist_db
|
||||
|
||||
# Initialize Celery app
|
||||
celery_app = Celery('download_tasks',
|
||||
@@ -826,6 +828,22 @@ def task_postrun_handler(task_id=None, task=None, retval=None, state=None, *args
|
||||
"message": "Download completed successfully."
|
||||
})
|
||||
logger.info(f"Task {task_id} completed successfully: {task_info.get('name', 'Unknown')}")
|
||||
|
||||
# If from playlist_watch and successful, add track to DB
|
||||
original_request = task_info.get("original_request", {})
|
||||
if original_request.get("source") == "playlist_watch":
|
||||
playlist_id = original_request.get("playlist_id")
|
||||
track_item_for_db = original_request.get("track_item_for_db")
|
||||
|
||||
if playlist_id and track_item_for_db and track_item_for_db.get('track'):
|
||||
logger.info(f"Task {task_id} was from playlist watch for playlist {playlist_id}. Adding track to DB.")
|
||||
try:
|
||||
add_single_track_to_playlist_db(playlist_id, track_item_for_db)
|
||||
except Exception as db_add_err:
|
||||
logger.error(f"Failed to add track to DB for playlist {playlist_id} after successful download task {task_id}: {db_add_err}", exc_info=True)
|
||||
else:
|
||||
logger.warning(f"Task {task_id} was from playlist_watch but missing playlist_id or track_item_for_db for DB update. Original Request: {original_request}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in task_postrun_handler: {e}")
|
||||
|
||||
|
||||
@@ -7,13 +7,15 @@ from routes.utils.celery_queue_manager import get_config_params
|
||||
|
||||
# We'll rely on get_config_params() instead of directly loading the config file
|
||||
|
||||
def get_spotify_info(spotify_id, spotify_type):
|
||||
def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
|
||||
"""
|
||||
Get info from Spotify API using the default Spotify account configured in main.json
|
||||
|
||||
Args:
|
||||
spotify_id: The Spotify ID of the entity
|
||||
spotify_type: The type of entity (track, album, playlist, artist)
|
||||
limit (int, optional): The maximum number of items to return. Only used if spotify_type is "artist".
|
||||
offset (int, optional): The index of the first item to return. Only used if spotify_type is "artist".
|
||||
|
||||
Returns:
|
||||
Dictionary with the entity information
|
||||
@@ -51,7 +53,14 @@ def get_spotify_info(spotify_id, spotify_type):
|
||||
elif spotify_type == "playlist":
|
||||
return Spo.get_playlist(spotify_id)
|
||||
elif spotify_type == "artist":
|
||||
return Spo.get_artist(spotify_id)
|
||||
if limit is not None and offset is not None:
|
||||
return Spo.get_artist(spotify_id, limit=limit, offset=offset)
|
||||
elif limit is not None:
|
||||
return Spo.get_artist(spotify_id, limit=limit)
|
||||
elif offset is not None:
|
||||
return Spo.get_artist(spotify_id, offset=offset)
|
||||
else:
|
||||
return Spo.get_artist(spotify_id)
|
||||
elif spotify_type == "episode":
|
||||
return Spo.get_episode(spotify_id)
|
||||
else:
|
||||
|
||||
703
routes/utils/watch/db.py
Normal file
703
routes/utils/watch/db.py
Normal file
@@ -0,0 +1,703 @@
|
||||
import sqlite3
|
||||
import json
|
||||
from pathlib import Path
|
||||
import logging
|
||||
import time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DB_DIR = Path('./data/watch')
|
||||
# Define separate DB paths
|
||||
PLAYLISTS_DB_PATH = DB_DIR / 'playlists.db'
|
||||
ARTISTS_DB_PATH = DB_DIR / 'artists.db'
|
||||
|
||||
# Config path remains the same
|
||||
CONFIG_PATH = Path('./data/config/watch.json')
|
||||
|
||||
def _get_playlists_db_connection():
|
||||
DB_DIR.mkdir(parents=True, exist_ok=True)
|
||||
conn = sqlite3.connect(PLAYLISTS_DB_PATH, timeout=10)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def _get_artists_db_connection():
|
||||
DB_DIR.mkdir(parents=True, exist_ok=True)
|
||||
conn = sqlite3.connect(ARTISTS_DB_PATH, timeout=10)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def init_playlists_db():
|
||||
"""Initializes the playlists database and creates the main watched_playlists table if it doesn't exist."""
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS watched_playlists (
|
||||
spotify_id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
owner_id TEXT,
|
||||
owner_name TEXT,
|
||||
total_tracks INTEGER,
|
||||
link TEXT,
|
||||
snapshot_id TEXT,
|
||||
last_checked INTEGER,
|
||||
added_at INTEGER,
|
||||
is_active INTEGER DEFAULT 1
|
||||
)
|
||||
""")
|
||||
conn.commit()
|
||||
logger.info(f"Playlists database initialized successfully at {PLAYLISTS_DB_PATH}")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error initializing watched_playlists table: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def _create_playlist_tracks_table(playlist_spotify_id: str):
|
||||
"""Creates a table for a specific playlist to store its tracks if it doesn't exist in playlists.db."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}" # Sanitize table name
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"""
|
||||
CREATE TABLE IF NOT EXISTS {table_name} (
|
||||
spotify_track_id TEXT PRIMARY KEY,
|
||||
title TEXT,
|
||||
artist_names TEXT, -- Comma-separated artist names
|
||||
album_name TEXT,
|
||||
album_artist_names TEXT, -- Comma-separated album artist names
|
||||
track_number INTEGER,
|
||||
album_spotify_id TEXT,
|
||||
duration_ms INTEGER,
|
||||
added_at_playlist TEXT, -- When track was added to Spotify playlist
|
||||
added_to_db INTEGER, -- Timestamp when track was added to this DB table
|
||||
is_present_in_spotify INTEGER DEFAULT 1, -- Flag to mark if still in Spotify playlist
|
||||
last_seen_in_spotify INTEGER -- Timestamp when last confirmed in Spotify playlist
|
||||
)
|
||||
""")
|
||||
conn.commit()
|
||||
logger.info(f"Tracks table '{table_name}' created or already exists in {PLAYLISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error creating playlist tracks table {table_name} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def add_playlist_to_watch(playlist_data: dict):
|
||||
"""Adds a playlist to the watched_playlists table and creates its tracks table in playlists.db."""
|
||||
try:
|
||||
_create_playlist_tracks_table(playlist_data['id'])
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
INSERT OR REPLACE INTO watched_playlists
|
||||
(spotify_id, name, owner_id, owner_name, total_tracks, link, snapshot_id, last_checked, added_at, is_active)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 1)
|
||||
""", (
|
||||
playlist_data['id'],
|
||||
playlist_data['name'],
|
||||
playlist_data['owner']['id'],
|
||||
playlist_data['owner'].get('display_name', playlist_data['owner']['id']),
|
||||
playlist_data['tracks']['total'],
|
||||
playlist_data['external_urls']['spotify'],
|
||||
playlist_data.get('snapshot_id'),
|
||||
int(time.time()),
|
||||
int(time.time())
|
||||
))
|
||||
conn.commit()
|
||||
logger.info(f"Playlist '{playlist_data['name']}' ({playlist_data['id']}) added to watchlist in {PLAYLISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error adding playlist {playlist_data.get('id')} to watchlist in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def remove_playlist_from_watch(playlist_spotify_id: str):
|
||||
"""Removes a playlist from watched_playlists and drops its tracks table in playlists.db."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DELETE FROM watched_playlists WHERE spotify_id = ?", (playlist_spotify_id,))
|
||||
cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
|
||||
conn.commit()
|
||||
logger.info(f"Playlist {playlist_spotify_id} removed from watchlist and its table '{table_name}' dropped in {PLAYLISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error removing playlist {playlist_spotify_id} from watchlist in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def get_watched_playlists():
|
||||
"""Retrieves all active playlists from the watched_playlists table in playlists.db."""
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM watched_playlists WHERE is_active = 1")
|
||||
playlists = [dict(row) for row in cursor.fetchall()]
|
||||
return playlists
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error retrieving watched playlists from {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
def get_watched_playlist(playlist_spotify_id: str):
|
||||
"""Retrieves a specific playlist from the watched_playlists table in playlists.db."""
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM watched_playlists WHERE spotify_id = ?", (playlist_spotify_id,))
|
||||
row = cursor.fetchone()
|
||||
return dict(row) if row else None
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error retrieving playlist {playlist_spotify_id} from {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
def update_playlist_snapshot(playlist_spotify_id: str, snapshot_id: str, total_tracks: int):
|
||||
"""Updates the snapshot_id and total_tracks for a watched playlist in playlists.db."""
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
UPDATE watched_playlists
|
||||
SET snapshot_id = ?, total_tracks = ?, last_checked = ?
|
||||
WHERE spotify_id = ?
|
||||
""", (snapshot_id, total_tracks, int(time.time()), playlist_spotify_id))
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error updating snapshot for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
|
||||
def get_playlist_track_ids_from_db(playlist_spotify_id: str):
|
||||
"""Retrieves all track Spotify IDs from a specific playlist's tracks table in playlists.db."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
track_ids = set()
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
|
||||
if cursor.fetchone() is None:
|
||||
logger.warning(f"Track table {table_name} does not exist in {PLAYLISTS_DB_PATH}. Cannot fetch track IDs.")
|
||||
return track_ids
|
||||
cursor.execute(f"SELECT spotify_track_id FROM {table_name} WHERE is_present_in_spotify = 1")
|
||||
rows = cursor.fetchall()
|
||||
for row in rows:
|
||||
track_ids.add(row['spotify_track_id'])
|
||||
return track_ids
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error retrieving track IDs for playlist {playlist_spotify_id} from table {table_name} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
return track_ids
|
||||
|
||||
def add_tracks_to_playlist_db(playlist_spotify_id: str, tracks_data: list):
|
||||
"""Adds or updates a list of tracks in the specified playlist's tracks table in playlists.db."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
if not tracks_data:
|
||||
return
|
||||
|
||||
current_time = int(time.time())
|
||||
tracks_to_insert = []
|
||||
for track_item in tracks_data:
|
||||
track = track_item.get('track')
|
||||
if not track or not track.get('id'):
|
||||
logger.warning(f"Skipping track due to missing data or ID in playlist {playlist_spotify_id}: {track_item}")
|
||||
continue
|
||||
|
||||
# Ensure 'artists' and 'album' -> 'artists' are lists and extract names
|
||||
artist_names = ", ".join([artist['name'] for artist in track.get('artists', []) if artist.get('name')])
|
||||
album_artist_names = ", ".join([artist['name'] for artist in track.get('album', {}).get('artists', []) if artist.get('name')])
|
||||
|
||||
tracks_to_insert.append((
|
||||
track['id'],
|
||||
track.get('name', 'N/A'),
|
||||
artist_names,
|
||||
track.get('album', {}).get('name', 'N/A'),
|
||||
album_artist_names,
|
||||
track.get('track_number'),
|
||||
track.get('album', {}).get('id'),
|
||||
track.get('duration_ms'),
|
||||
track_item.get('added_at'), # From playlist item
|
||||
current_time, # added_to_db
|
||||
1, # is_present_in_spotify
|
||||
current_time # last_seen_in_spotify
|
||||
))
|
||||
|
||||
if not tracks_to_insert:
|
||||
logger.info(f"No valid tracks to insert for playlist {playlist_spotify_id}.")
|
||||
return
|
||||
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
_create_playlist_tracks_table(playlist_spotify_id) # Ensure table exists
|
||||
|
||||
cursor.executemany(f"""
|
||||
INSERT OR REPLACE INTO {table_name}
|
||||
(spotify_track_id, title, artist_names, album_name, album_artist_names, track_number, album_spotify_id, duration_ms, added_at_playlist, added_to_db, is_present_in_spotify, last_seen_in_spotify)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", tracks_to_insert)
|
||||
conn.commit()
|
||||
logger.info(f"Added/updated {len(tracks_to_insert)} tracks in DB for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error adding tracks to playlist {playlist_spotify_id} in table {table_name} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
# Not raising here to allow other operations to continue if one batch fails.
|
||||
|
||||
def mark_tracks_as_not_present_in_spotify(playlist_spotify_id: str, track_ids_to_mark: list):
|
||||
"""Marks specified tracks as not present in the Spotify playlist anymore in playlists.db."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
if not track_ids_to_mark:
|
||||
return
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
placeholders = ','.join('?' for _ in track_ids_to_mark)
|
||||
sql = f"UPDATE {table_name} SET is_present_in_spotify = 0 WHERE spotify_track_id IN ({placeholders})"
|
||||
cursor.execute(sql, track_ids_to_mark)
|
||||
conn.commit()
|
||||
logger.info(f"Marked {cursor.rowcount} tracks as not present in Spotify for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error marking tracks as not present for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
|
||||
def add_specific_tracks_to_playlist_table(playlist_spotify_id: str, track_details_list: list):
|
||||
"""
|
||||
Adds specific tracks (with full details fetched separately) to the playlist's table.
|
||||
This is used when a user manually marks tracks as "downloaded" or "known".
|
||||
"""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
if not track_details_list:
|
||||
return
|
||||
|
||||
current_time = int(time.time())
|
||||
tracks_to_insert = []
|
||||
|
||||
for track in track_details_list: # track here is assumed to be a full Spotify TrackObject
|
||||
if not track or not track.get('id'):
|
||||
logger.warning(f"Skipping track due to missing data or ID (manual add) in playlist {playlist_spotify_id}: {track}")
|
||||
continue
|
||||
|
||||
artist_names = ", ".join([artist['name'] for artist in track.get('artists', []) if artist.get('name')])
|
||||
album_artist_names = ", ".join([artist['name'] for artist in track.get('album', {}).get('artists', []) if artist.get('name')])
|
||||
|
||||
tracks_to_insert.append((
|
||||
track['id'],
|
||||
track.get('name', 'N/A'),
|
||||
artist_names,
|
||||
track.get('album', {}).get('name', 'N/A'),
|
||||
album_artist_names,
|
||||
track.get('track_number'),
|
||||
track.get('album', {}).get('id'),
|
||||
track.get('duration_ms'),
|
||||
None, # added_at_playlist - not known for manually added tracks this way
|
||||
current_time, # added_to_db
|
||||
1, # is_present_in_spotify (assume user wants it considered present)
|
||||
current_time # last_seen_in_spotify
|
||||
))
|
||||
|
||||
if not tracks_to_insert:
|
||||
logger.info(f"No valid tracks to insert (manual add) for playlist {playlist_spotify_id}.")
|
||||
return
|
||||
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
_create_playlist_tracks_table(playlist_spotify_id) # Ensure table exists
|
||||
cursor.executemany(f"""
|
||||
INSERT OR REPLACE INTO {table_name}
|
||||
(spotify_track_id, title, artist_names, album_name, album_artist_names, track_number, album_spotify_id, duration_ms, added_at_playlist, added_to_db, is_present_in_spotify, last_seen_in_spotify)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", tracks_to_insert)
|
||||
conn.commit()
|
||||
logger.info(f"Manually added/updated {len(tracks_to_insert)} tracks in DB for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error manually adding tracks to playlist {playlist_spotify_id} in table {table_name} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
|
||||
def remove_specific_tracks_from_playlist_table(playlist_spotify_id: str, track_spotify_ids: list):
|
||||
"""Removes specific tracks from the playlist's local track table."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
if not track_spotify_ids:
|
||||
return 0
|
||||
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
placeholders = ','.join('?' for _ in track_spotify_ids)
|
||||
# Check if table exists first
|
||||
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
|
||||
if cursor.fetchone() is None:
|
||||
logger.warning(f"Track table {table_name} does not exist. Cannot remove tracks.")
|
||||
return 0
|
||||
|
||||
cursor.execute(f"DELETE FROM {table_name} WHERE spotify_track_id IN ({placeholders})", track_spotify_ids)
|
||||
conn.commit()
|
||||
deleted_count = cursor.rowcount
|
||||
logger.info(f"Manually removed {deleted_count} tracks from DB for playlist {playlist_spotify_id}.")
|
||||
return deleted_count
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error manually removing tracks for playlist {playlist_spotify_id} from table {table_name}: {e}", exc_info=True)
|
||||
return 0
|
||||
|
||||
def add_single_track_to_playlist_db(playlist_spotify_id: str, track_item_for_db: dict):
|
||||
"""Adds or updates a single track in the specified playlist's tracks table in playlists.db."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
track_detail = track_item_for_db.get('track')
|
||||
if not track_detail or not track_detail.get('id'):
|
||||
logger.warning(f"Skipping single track due to missing data for playlist {playlist_spotify_id}: {track_item_for_db}")
|
||||
return
|
||||
|
||||
current_time = int(time.time())
|
||||
artist_names = ", ".join([a['name'] for a in track_detail.get('artists', []) if a.get('name')])
|
||||
album_artist_names = ", ".join([a['name'] for a in track_detail.get('album', {}).get('artists', []) if a.get('name')])
|
||||
|
||||
track_data_tuple = (
|
||||
track_detail['id'],
|
||||
track_detail.get('name', 'N/A'),
|
||||
artist_names,
|
||||
track_detail.get('album', {}).get('name', 'N/A'),
|
||||
album_artist_names,
|
||||
track_detail.get('track_number'),
|
||||
track_detail.get('album', {}).get('id'),
|
||||
track_detail.get('duration_ms'),
|
||||
track_item_for_db.get('added_at'),
|
||||
current_time,
|
||||
1,
|
||||
current_time
|
||||
)
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn: # Use playlists connection
|
||||
cursor = conn.cursor()
|
||||
_create_playlist_tracks_table(playlist_spotify_id)
|
||||
cursor.execute(f"""
|
||||
INSERT OR REPLACE INTO {table_name}
|
||||
(spotify_track_id, title, artist_names, album_name, album_artist_names, track_number, album_spotify_id, duration_ms, added_at_playlist, added_to_db, is_present_in_spotify, last_seen_in_spotify)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", track_data_tuple)
|
||||
conn.commit()
|
||||
logger.info(f"Track '{track_detail.get('name')}' added/updated in DB for playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error adding single track to playlist {playlist_spotify_id} in {PLAYLISTS_DB_PATH}: {e}", exc_info=True)
|
||||
|
||||
# --- Artist Watch Database Functions ---
|
||||
|
||||
def init_artists_db():
|
||||
"""Initializes the artists database and creates the watched_artists table if it doesn't exist."""
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS watched_artists (
|
||||
spotify_id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
total_albums_on_spotify INTEGER,
|
||||
last_checked INTEGER,
|
||||
added_at INTEGER,
|
||||
is_active INTEGER DEFAULT 1,
|
||||
last_known_status TEXT,
|
||||
last_task_id TEXT
|
||||
)
|
||||
""")
|
||||
conn.commit()
|
||||
logger.info(f"Artists database initialized successfully at {ARTISTS_DB_PATH}")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error initializing watched_artists table in {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def _create_artist_albums_table(artist_spotify_id: str):
|
||||
"""Creates a table for a specific artist to store its albums if it doesn't exist in artists.db."""
|
||||
table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums"
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"""
|
||||
CREATE TABLE IF NOT EXISTS {table_name} (
|
||||
album_spotify_id TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
album_group TEXT,
|
||||
album_type TEXT,
|
||||
release_date TEXT,
|
||||
total_tracks INTEGER,
|
||||
added_to_db_at INTEGER,
|
||||
is_download_initiated INTEGER DEFAULT 0,
|
||||
task_id TEXT,
|
||||
last_checked_for_tracks INTEGER
|
||||
)
|
||||
""")
|
||||
conn.commit()
|
||||
logger.info(f"Albums table '{table_name}' for artist {artist_spotify_id} created or exists in {ARTISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error creating artist albums table {table_name} in {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def add_artist_to_watch(artist_data: dict):
|
||||
"""Adds an artist to the watched_artists table and creates its albums table in artists.db."""
|
||||
artist_id = artist_data.get('id')
|
||||
if not artist_id:
|
||||
logger.error("Cannot add artist to watch: Missing 'id' in artist_data.")
|
||||
return
|
||||
|
||||
try:
|
||||
_create_artist_albums_table(artist_id)
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
INSERT OR REPLACE INTO watched_artists
|
||||
(spotify_id, name, total_albums_on_spotify, last_checked, added_at, is_active)
|
||||
VALUES (?, ?, ?, ?, ?, 1)
|
||||
""", (
|
||||
artist_id,
|
||||
artist_data.get('name', 'N/A'),
|
||||
artist_data.get('albums', {}).get('total', 0),
|
||||
int(time.time()),
|
||||
int(time.time())
|
||||
))
|
||||
conn.commit()
|
||||
logger.info(f"Artist '{artist_data.get('name')}' ({artist_id}) added to watchlist in {ARTISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error adding artist {artist_id} to watchlist in {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
raise
|
||||
except KeyError as e:
|
||||
logger.error(f"Missing key in artist_data for artist {artist_id}: {e}. Data: {artist_data}", exc_info=True)
|
||||
raise
|
||||
|
||||
def remove_artist_from_watch(artist_spotify_id: str):
|
||||
"""Removes an artist from watched_artists and drops its albums table in artists.db."""
|
||||
table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums"
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DELETE FROM watched_artists WHERE spotify_id = ?", (artist_spotify_id,))
|
||||
cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
|
||||
conn.commit()
|
||||
logger.info(f"Artist {artist_spotify_id} removed from watchlist and its table '{table_name}' dropped from {ARTISTS_DB_PATH}.")
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error removing artist {artist_spotify_id} from watchlist in {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def get_watched_artists():
|
||||
"""Retrieves all active artists from the watched_artists table in artists.db."""
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM watched_artists WHERE is_active = 1")
|
||||
artists = [dict(row) for row in cursor.fetchall()]
|
||||
return artists
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error retrieving watched artists from {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
def get_watched_artist(artist_spotify_id: str):
|
||||
"""Retrieves a specific artist from the watched_artists table in artists.db."""
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM watched_artists WHERE spotify_id = ?", (artist_spotify_id,))
|
||||
row = cursor.fetchone()
|
||||
return dict(row) if row else None
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error retrieving artist {artist_spotify_id} from {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
def update_artist_metadata_after_check(artist_spotify_id: str, total_albums_from_api: int):
|
||||
"""Updates the total_albums_on_spotify and last_checked for an artist in artists.db."""
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
UPDATE watched_artists
|
||||
SET total_albums_on_spotify = ?, last_checked = ?
|
||||
WHERE spotify_id = ?
|
||||
""", (total_albums_from_api, int(time.time()), artist_spotify_id))
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error updating metadata for artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
|
||||
def get_artist_album_ids_from_db(artist_spotify_id: str):
|
||||
"""Retrieves all album Spotify IDs from a specific artist's albums table in artists.db."""
|
||||
table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums"
|
||||
album_ids = set()
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
|
||||
if cursor.fetchone() is None:
|
||||
logger.warning(f"Album table {table_name} for artist {artist_spotify_id} does not exist in {ARTISTS_DB_PATH}. Cannot fetch album IDs.")
|
||||
return album_ids
|
||||
cursor.execute(f"SELECT album_spotify_id FROM {table_name}")
|
||||
rows = cursor.fetchall()
|
||||
for row in rows:
|
||||
album_ids.add(row['album_spotify_id'])
|
||||
return album_ids
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error retrieving album IDs for artist {artist_spotify_id} from {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
return album_ids
|
||||
|
||||
def add_or_update_album_for_artist(artist_spotify_id: str, album_data: dict, task_id: str = None, is_download_complete: bool = False):
|
||||
"""Adds or updates an album in the specified artist's albums table in artists.db."""
|
||||
table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums"
|
||||
album_id = album_data.get('id')
|
||||
if not album_id:
|
||||
logger.warning(f"Skipping album for artist {artist_spotify_id} due to missing album ID: {album_data}")
|
||||
return
|
||||
|
||||
download_status = 0
|
||||
if task_id and not is_download_complete:
|
||||
download_status = 1
|
||||
elif is_download_complete:
|
||||
download_status = 2
|
||||
|
||||
current_time = int(time.time())
|
||||
album_tuple = (
|
||||
album_id,
|
||||
album_data.get('name', 'N/A'),
|
||||
album_data.get('album_group', 'N/A'),
|
||||
album_data.get('album_type', 'N/A'),
|
||||
album_data.get('release_date'),
|
||||
album_data.get('total_tracks'),
|
||||
current_time,
|
||||
download_status,
|
||||
task_id
|
||||
)
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
_create_artist_albums_table(artist_spotify_id)
|
||||
|
||||
cursor.execute(f"SELECT added_to_db_at FROM {table_name} WHERE album_spotify_id = ?", (album_id,))
|
||||
existing_row = cursor.fetchone()
|
||||
|
||||
if existing_row:
|
||||
update_tuple = (
|
||||
album_data.get('name', 'N/A'),
|
||||
album_data.get('album_group', 'N/A'),
|
||||
album_data.get('album_type', 'N/A'),
|
||||
album_data.get('release_date'),
|
||||
album_data.get('total_tracks'),
|
||||
download_status,
|
||||
task_id,
|
||||
album_id
|
||||
)
|
||||
cursor.execute(f"""
|
||||
UPDATE {table_name} SET
|
||||
name = ?, album_group = ?, album_type = ?, release_date = ?, total_tracks = ?,
|
||||
is_download_initiated = ?, task_id = ?
|
||||
WHERE album_spotify_id = ?
|
||||
""", update_tuple)
|
||||
logger.info(f"Updated album '{album_data.get('name')}' in DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.")
|
||||
else:
|
||||
cursor.execute(f"""
|
||||
INSERT INTO {table_name}
|
||||
(album_spotify_id, name, album_group, album_type, release_date, total_tracks, added_to_db_at, is_download_initiated, task_id)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", album_tuple)
|
||||
logger.info(f"Added album '{album_data.get('name')}' to DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.")
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error adding/updating album {album_id} for artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
|
||||
def update_album_download_status_for_artist(artist_spotify_id: str, album_spotify_id: str, task_id: str, status: int):
|
||||
"""Updates the download status (is_download_initiated) and task_id for a specific album of an artist in artists.db."""
|
||||
table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums"
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"""
|
||||
UPDATE {table_name}
|
||||
SET is_download_initiated = ?, task_id = ?
|
||||
WHERE album_spotify_id = ?
|
||||
""", (status, task_id, album_spotify_id))
|
||||
if cursor.rowcount == 0:
|
||||
logger.warning(f"Attempted to update download status for non-existent album {album_spotify_id} for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.")
|
||||
else:
|
||||
logger.info(f"Updated download status to {status} for album {album_spotify_id} (task: {task_id}) for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.")
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error updating album download status for album {album_spotify_id}, artist {artist_spotify_id} in {ARTISTS_DB_PATH}: {e}", exc_info=True)
|
||||
|
||||
def add_specific_albums_to_artist_table(artist_spotify_id: str, album_details_list: list):
|
||||
"""
|
||||
Adds specific albums (with full details fetched separately) to the artist's album table.
|
||||
This can be used when a user manually marks albums as "known" or "processed".
|
||||
Albums added this way are marked with is_download_initiated = 3 (Manually Added/Known).
|
||||
"""
|
||||
if not album_details_list:
|
||||
logger.info(f"No album details provided to add specifically for artist {artist_spotify_id}.")
|
||||
return 0
|
||||
|
||||
processed_count = 0
|
||||
for album_data in album_details_list:
|
||||
if not album_data or not album_data.get('id'):
|
||||
logger.warning(f"Skipping album due to missing data or ID (manual add) for artist {artist_spotify_id}: {album_data}")
|
||||
continue
|
||||
|
||||
# Use existing function to add/update, ensuring it handles manual state
|
||||
# Set task_id to None and is_download_initiated to a specific state for manually added known albums
|
||||
# The add_or_update_album_for_artist expects `is_download_complete` not `is_download_initiated` directly.
|
||||
# We can adapt `add_or_update_album_for_artist` or pass status directly if it's modified to handle it.
|
||||
# For now, let's pass task_id=None and a flag that implies manual addition (e.g. is_download_complete=True, and then modify add_or_update_album_for_artist status logic)
|
||||
# Or, more directly, update the `is_download_initiated` field as part of the album_tuple for INSERT and in UPDATE.
|
||||
# Let's stick to calling `add_or_update_album_for_artist` and adjust its status handling if needed.
|
||||
# Setting `is_download_complete=True` and `task_id=None` should set `is_download_initiated = 2` (completed)
|
||||
# We might need a new status like 3 for "Manually Marked as Known"
|
||||
# For simplicity, we'll use `add_or_update_album_for_artist` and the status will be 'download_complete'.
|
||||
# If a more distinct status is needed, `add_or_update_album_for_artist` would need adjustment.
|
||||
|
||||
# Simplification: we'll call add_or_update_album_for_artist which will mark it based on task_id presence or completion.
|
||||
# For a truly "manual" state distinct from "downloaded", `add_or_update_album_for_artist` would need a new status value.
|
||||
# Let's assume for now that adding it via this function means it's "known" and doesn't need downloading.
|
||||
# The `add_or_update_album_for_artist` function sets is_download_initiated based on task_id and is_download_complete.
|
||||
# If task_id is None and is_download_complete is True, it implies it's processed.
|
||||
try:
|
||||
add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=True)
|
||||
processed_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Error manually adding album {album_data.get('id')} for artist {artist_spotify_id}: {e}", exc_info=True)
|
||||
|
||||
logger.info(f"Manually added/updated {processed_count} albums in DB for artist {artist_spotify_id} in {ARTISTS_DB_PATH}.")
|
||||
return processed_count
|
||||
|
||||
def remove_specific_albums_from_artist_table(artist_spotify_id: str, album_spotify_ids: list):
|
||||
"""Removes specific albums from the artist's local album table."""
|
||||
table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums"
|
||||
if not album_spotify_ids:
|
||||
return 0
|
||||
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
placeholders = ','.join('?' for _ in album_spotify_ids)
|
||||
# Check if table exists first
|
||||
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
|
||||
if cursor.fetchone() is None:
|
||||
logger.warning(f"Album table {table_name} for artist {artist_spotify_id} does not exist. Cannot remove albums.")
|
||||
return 0
|
||||
|
||||
cursor.execute(f"DELETE FROM {table_name} WHERE album_spotify_id IN ({placeholders})", album_spotify_ids)
|
||||
conn.commit()
|
||||
deleted_count = cursor.rowcount
|
||||
logger.info(f"Manually removed {deleted_count} albums from DB for artist {artist_spotify_id}.")
|
||||
return deleted_count
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error manually removing albums for artist {artist_spotify_id} from table {table_name}: {e}", exc_info=True)
|
||||
return 0
|
||||
|
||||
def is_track_in_playlist_db(playlist_spotify_id: str, track_spotify_id: str) -> bool:
|
||||
"""Checks if a specific track Spotify ID exists in the given playlist's tracks table."""
|
||||
table_name = f"playlist_{playlist_spotify_id.replace('-', '_')}"
|
||||
try:
|
||||
with _get_playlists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
# First, check if the table exists to prevent errors on non-watched or new playlists
|
||||
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
|
||||
if cursor.fetchone() is None:
|
||||
return False # Table doesn't exist, so track cannot be in it
|
||||
|
||||
cursor.execute(f"SELECT 1 FROM {table_name} WHERE spotify_track_id = ?", (track_spotify_id,))
|
||||
return cursor.fetchone() is not None
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error checking if track {track_spotify_id} is in playlist {playlist_spotify_id} DB: {e}", exc_info=True)
|
||||
return False # Assume not present on error
|
||||
|
||||
def is_album_in_artist_db(artist_spotify_id: str, album_spotify_id: str) -> bool:
|
||||
"""Checks if a specific album Spotify ID exists in the given artist's albums table."""
|
||||
table_name = f"artist_{artist_spotify_id.replace('-', '_')}_albums"
|
||||
try:
|
||||
with _get_artists_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
# First, check if the table exists
|
||||
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
|
||||
if cursor.fetchone() is None:
|
||||
return False # Table doesn't exist
|
||||
|
||||
cursor.execute(f"SELECT 1 FROM {table_name} WHERE album_spotify_id = ?", (album_spotify_id,))
|
||||
return cursor.fetchone() is not None
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error checking if album {album_spotify_id} is in artist {artist_spotify_id} DB: {e}", exc_info=True)
|
||||
return False # Assume not present on error
|
||||
415
routes/utils/watch/manager.py
Normal file
415
routes/utils/watch/manager.py
Normal file
@@ -0,0 +1,415 @@
|
||||
import time
|
||||
import threading
|
||||
import logging
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from routes.utils.watch.db import (
|
||||
get_watched_playlists,
|
||||
get_watched_playlist,
|
||||
get_playlist_track_ids_from_db,
|
||||
add_tracks_to_playlist_db,
|
||||
update_playlist_snapshot,
|
||||
mark_tracks_as_not_present_in_spotify,
|
||||
# Artist watch DB functions
|
||||
init_artists_db,
|
||||
get_watched_artists,
|
||||
get_watched_artist,
|
||||
get_artist_album_ids_from_db,
|
||||
add_or_update_album_for_artist, # Renamed from add_album_to_artist_db
|
||||
update_artist_metadata_after_check # Renamed from update_artist_metadata
|
||||
)
|
||||
from routes.utils.get_info import get_spotify_info # To fetch playlist, track, artist, and album details
|
||||
from routes.utils.celery_queue_manager import download_queue_manager, get_config_params
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
CONFIG_PATH = Path('./data/config/watch.json')
|
||||
STOP_EVENT = threading.Event()
|
||||
|
||||
DEFAULT_WATCH_CONFIG = {
|
||||
"watchPollIntervalSeconds": 3600,
|
||||
"max_tracks_per_run": 50, # For playlists
|
||||
"watchedArtistAlbumGroup": ["album", "single"], # Default for artists
|
||||
"delay_between_playlists_seconds": 2,
|
||||
"delay_between_artists_seconds": 5 # Added for artists
|
||||
}
|
||||
|
||||
def get_watch_config():
|
||||
"""Loads the watch configuration from watch.json."""
|
||||
try:
|
||||
if CONFIG_PATH.exists():
|
||||
with open(CONFIG_PATH, 'r') as f:
|
||||
config = json.load(f)
|
||||
# Ensure all default keys are present
|
||||
for key, value in DEFAULT_WATCH_CONFIG.items():
|
||||
config.setdefault(key, value)
|
||||
return config
|
||||
else:
|
||||
# Create a default config if it doesn't exist
|
||||
with open(CONFIG_PATH, 'w') as f:
|
||||
json.dump(DEFAULT_WATCH_CONFIG, f, indent=2)
|
||||
logger.info(f"Created default watch config at {CONFIG_PATH}")
|
||||
return DEFAULT_WATCH_CONFIG
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading watch config: {e}", exc_info=True)
|
||||
return DEFAULT_WATCH_CONFIG # Fallback
|
||||
|
||||
def construct_spotify_url(item_id, item_type="track"):
|
||||
return f"https://open.spotify.com/{item_type}/{item_id}"
|
||||
|
||||
def check_watched_playlists(specific_playlist_id: str = None):
|
||||
"""Checks watched playlists for new tracks and queues downloads.
|
||||
If specific_playlist_id is provided, only that playlist is checked.
|
||||
"""
|
||||
logger.info(f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}")
|
||||
config = get_watch_config()
|
||||
|
||||
if specific_playlist_id:
|
||||
playlist_obj = get_watched_playlist(specific_playlist_id)
|
||||
if not playlist_obj:
|
||||
logger.error(f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database.")
|
||||
return
|
||||
watched_playlists_to_check = [playlist_obj]
|
||||
else:
|
||||
watched_playlists_to_check = get_watched_playlists()
|
||||
|
||||
if not watched_playlists_to_check:
|
||||
logger.info("Playlist Watch Manager: No playlists to check.")
|
||||
return
|
||||
|
||||
for playlist_in_db in watched_playlists_to_check:
|
||||
playlist_spotify_id = playlist_in_db['spotify_id']
|
||||
playlist_name = playlist_in_db['name']
|
||||
logger.info(f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})...")
|
||||
|
||||
try:
|
||||
# For playlists, we fetch all tracks in one go usually (Spotify API limit permitting)
|
||||
current_playlist_data_from_api = get_spotify_info(playlist_spotify_id, "playlist")
|
||||
if not current_playlist_data_from_api or 'tracks' not in current_playlist_data_from_api:
|
||||
logger.error(f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}.")
|
||||
continue
|
||||
|
||||
api_snapshot_id = current_playlist_data_from_api.get('snapshot_id')
|
||||
api_total_tracks = current_playlist_data_from_api.get('tracks', {}).get('total', 0)
|
||||
|
||||
# Paginate through playlist tracks if necessary
|
||||
all_api_track_items = []
|
||||
offset = 0
|
||||
limit = 50 # Spotify API limit for playlist items
|
||||
|
||||
while True:
|
||||
# Re-fetch with pagination if tracks.next is present, or on first call.
|
||||
# get_spotify_info for playlist should ideally handle pagination internally if asked for all tracks.
|
||||
# Assuming get_spotify_info for playlist returns all items or needs to be called iteratively.
|
||||
# For simplicity, let's assume current_playlist_data_from_api has 'tracks' -> 'items' for the first page.
|
||||
# And that get_spotify_info with 'playlist' type can take offset.
|
||||
# Modifying get_spotify_info is outside current scope, so we'll assume it returns ALL items for a playlist.
|
||||
# If it doesn't, this part would need adjustment for robust pagination.
|
||||
# For now, we use the items from the initial fetch.
|
||||
|
||||
paginated_playlist_data = get_spotify_info(playlist_spotify_id, "playlist", offset=offset, limit=limit)
|
||||
if not paginated_playlist_data or 'tracks' not in paginated_playlist_data:
|
||||
break
|
||||
|
||||
page_items = paginated_playlist_data.get('tracks', {}).get('items', [])
|
||||
if not page_items:
|
||||
break
|
||||
all_api_track_items.extend(page_items)
|
||||
|
||||
if paginated_playlist_data.get('tracks', {}).get('next'):
|
||||
offset += limit
|
||||
else:
|
||||
break
|
||||
|
||||
current_api_track_ids = set()
|
||||
api_track_id_to_item_map = {}
|
||||
for item in all_api_track_items: # Use all_api_track_items
|
||||
track = item.get('track')
|
||||
if track and track.get('id') and not track.get('is_local'):
|
||||
track_id = track['id']
|
||||
current_api_track_ids.add(track_id)
|
||||
api_track_id_to_item_map[track_id] = item
|
||||
|
||||
db_track_ids = get_playlist_track_ids_from_db(playlist_spotify_id)
|
||||
|
||||
new_track_ids_for_download = current_api_track_ids - db_track_ids
|
||||
queued_for_download_count = 0
|
||||
if new_track_ids_for_download:
|
||||
logger.info(f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download.")
|
||||
for track_id in new_track_ids_for_download:
|
||||
api_item = api_track_id_to_item_map.get(track_id)
|
||||
if not api_item or not api_item.get("track"):
|
||||
logger.warning(f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue.")
|
||||
continue
|
||||
|
||||
track_to_queue = api_item["track"]
|
||||
task_payload = {
|
||||
"download_type": "track",
|
||||
"url": construct_spotify_url(track_id, "track"),
|
||||
"name": track_to_queue.get('name', 'Unknown Track'),
|
||||
"artist": ", ".join([a['name'] for a in track_to_queue.get('artists', []) if a.get('name')]),
|
||||
"orig_request": {
|
||||
"source": "playlist_watch",
|
||||
"playlist_id": playlist_spotify_id,
|
||||
"playlist_name": playlist_name,
|
||||
"track_spotify_id": track_id,
|
||||
"track_item_for_db": api_item # Pass full API item for DB update on completion
|
||||
}
|
||||
# "track_details_for_db" was old name, using track_item_for_db consistent with celery_tasks
|
||||
}
|
||||
try:
|
||||
task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True)
|
||||
if task_id_or_none: # Task was newly queued
|
||||
logger.info(f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'.")
|
||||
queued_for_download_count += 1
|
||||
# If task_id_or_none is None, it was a duplicate and not re-queued, Celery manager handles logging.
|
||||
except Exception as e:
|
||||
logger.error(f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}", exc_info=True)
|
||||
logger.info(f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'.")
|
||||
else:
|
||||
logger.info(f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'.")
|
||||
|
||||
# Update DB for tracks that are still present in API (e.g. update 'last_seen_in_spotify')
|
||||
# add_tracks_to_playlist_db handles INSERT OR REPLACE, updating existing entries.
|
||||
# We should pass all current API tracks to ensure their `last_seen_in_spotify` and `is_present_in_spotify` are updated.
|
||||
if all_api_track_items: # If there are any tracks in the API for this playlist
|
||||
logger.info(f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'.")
|
||||
add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items)
|
||||
|
||||
|
||||
removed_db_ids = db_track_ids - current_api_track_ids
|
||||
if removed_db_ids:
|
||||
logger.info(f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB.")
|
||||
mark_tracks_as_not_present_in_spotify(playlist_spotify_id, list(removed_db_ids))
|
||||
|
||||
update_playlist_snapshot(playlist_spotify_id, api_snapshot_id, api_total_tracks) # api_total_tracks from initial fetch
|
||||
logger.info(f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}", exc_info=True)
|
||||
|
||||
time.sleep(max(1, config.get("delay_between_playlists_seconds", 2)))
|
||||
|
||||
logger.info("Playlist Watch Manager: Finished checking all watched playlists.")
|
||||
|
||||
def check_watched_artists(specific_artist_id: str = None):
|
||||
"""Checks watched artists for new albums and queues downloads."""
|
||||
logger.info(f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}")
|
||||
config = get_watch_config()
|
||||
watched_album_groups = [g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"])]
|
||||
logger.info(f"Artist Watch Manager: Watching for album groups: {watched_album_groups}")
|
||||
|
||||
if specific_artist_id:
|
||||
artist_obj_in_db = get_watched_artist(specific_artist_id)
|
||||
if not artist_obj_in_db:
|
||||
logger.error(f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database.")
|
||||
return
|
||||
artists_to_check = [artist_obj_in_db]
|
||||
else:
|
||||
artists_to_check = get_watched_artists()
|
||||
|
||||
if not artists_to_check:
|
||||
logger.info("Artist Watch Manager: No artists to check.")
|
||||
return
|
||||
|
||||
for artist_in_db in artists_to_check:
|
||||
artist_spotify_id = artist_in_db['spotify_id']
|
||||
artist_name = artist_in_db['name']
|
||||
logger.info(f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})...")
|
||||
|
||||
try:
|
||||
# Spotify API for artist albums is paginated.
|
||||
# We need to fetch all albums. get_spotify_info with type 'artist-albums' should handle this.
|
||||
# Let's assume get_spotify_info(artist_id, 'artist-albums') returns a list of all album objects.
|
||||
# Or we implement pagination here.
|
||||
|
||||
all_artist_albums_from_api = []
|
||||
offset = 0
|
||||
limit = 50 # Spotify API limit for artist albums
|
||||
while True:
|
||||
# The 'artist-albums' type for get_spotify_info needs to support pagination params.
|
||||
# And return a list of album objects.
|
||||
logger.debug(f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}")
|
||||
artist_albums_page = get_spotify_info(artist_spotify_id, "artist", limit=limit, offset=offset)
|
||||
|
||||
if not artist_albums_page or not isinstance(artist_albums_page.get('items'), list):
|
||||
logger.warning(f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}")
|
||||
break
|
||||
|
||||
current_page_albums = artist_albums_page.get('items', [])
|
||||
if not current_page_albums:
|
||||
logger.info(f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}.")
|
||||
break
|
||||
|
||||
logger.debug(f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'.")
|
||||
all_artist_albums_from_api.extend(current_page_albums)
|
||||
|
||||
# Correct pagination: Check if Spotify indicates a next page URL
|
||||
# The `next` field in Spotify API responses is a URL to the next page or null.
|
||||
if artist_albums_page.get('next'):
|
||||
offset += limit # CORRECT: Increment offset by the limit used for the request
|
||||
else:
|
||||
logger.info(f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}.")
|
||||
break
|
||||
|
||||
# total_albums_from_api = len(all_artist_albums_from_api)
|
||||
# Use the 'total' field from the API response for a more accurate count of all available albums (matching current API filter if any)
|
||||
api_reported_total_albums = artist_albums_page.get('total', 0) if 'artist_albums_page' in locals() and artist_albums_page else len(all_artist_albums_from_api)
|
||||
logger.info(f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}.")
|
||||
|
||||
db_album_ids = get_artist_album_ids_from_db(artist_spotify_id)
|
||||
logger.info(f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes.")
|
||||
|
||||
queued_for_download_count = 0
|
||||
processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination)
|
||||
|
||||
for album_data in all_artist_albums_from_api:
|
||||
album_id = album_data.get('id')
|
||||
album_name = album_data.get('name', 'Unknown Album')
|
||||
album_group = album_data.get('album_group', 'N/A').lower()
|
||||
album_type = album_data.get('album_type', 'N/A').lower()
|
||||
|
||||
if not album_id:
|
||||
logger.warning(f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}")
|
||||
continue
|
||||
|
||||
if album_id in processed_album_ids_in_run:
|
||||
logger.debug(f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping.")
|
||||
continue
|
||||
processed_album_ids_in_run.add(album_id)
|
||||
|
||||
# Filter based on watchedArtistAlbumGroup
|
||||
# The album_group field is generally preferred for this type of categorization as per Spotify docs.
|
||||
is_matching_group = album_group in watched_album_groups
|
||||
|
||||
logger.debug(f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}.")
|
||||
|
||||
if not is_matching_group:
|
||||
logger.debug(f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}.")
|
||||
continue
|
||||
|
||||
logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group.")
|
||||
|
||||
if album_id not in db_album_ids:
|
||||
logger.info(f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download.")
|
||||
|
||||
album_artists_list = album_data.get('artists', [])
|
||||
album_main_artist_name = album_artists_list[0].get('name', 'Unknown Artist') if album_artists_list else 'Unknown Artist'
|
||||
|
||||
task_payload = {
|
||||
"download_type": "album", # Or "track" if downloading individual tracks of album later
|
||||
"url": construct_spotify_url(album_id, "album"),
|
||||
"name": album_name,
|
||||
"artist": album_main_artist_name, # Primary artist of the album
|
||||
"orig_request": {
|
||||
"source": "artist_watch",
|
||||
"artist_spotify_id": artist_spotify_id, # Watched artist
|
||||
"artist_name": artist_name,
|
||||
"album_spotify_id": album_id,
|
||||
"album_data_for_db": album_data # Pass full API album object for DB update on completion/queuing
|
||||
}
|
||||
}
|
||||
try:
|
||||
# Add to DB first with task_id, then queue. Or queue and add task_id to DB.
|
||||
# Let's use add_or_update_album_for_artist to record it with a task_id before queuing.
|
||||
# The celery_queue_manager.add_task might return None if it's a duplicate.
|
||||
|
||||
# Record the album in DB as being processed for download
|
||||
# Task_id will be added if successfully queued
|
||||
|
||||
# We should call add_task first, and if it returns a task_id (not a duplicate), then update our DB.
|
||||
task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True)
|
||||
|
||||
if task_id_or_none: # Task was newly queued
|
||||
add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=task_id_or_none, is_download_complete=False)
|
||||
logger.info(f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'.")
|
||||
queued_for_download_count += 1
|
||||
# If task_id_or_none is None, it was a duplicate. We can still log/record album_data if needed, but without task_id or as already seen.
|
||||
# add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None) # This would just log metadata if not a duplicate.
|
||||
# The current add_task logic in celery_manager might create an error task for duplicates,
|
||||
# so we might not need to do anything special here for duplicates apart from not incrementing count.
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Artist Watch Manager: Failed to queue/record download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}", exc_info=True)
|
||||
else:
|
||||
logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue.")
|
||||
# Optionally, update its entry (e.g. last_seen, or if details changed), but for now, we only queue new ones.
|
||||
# add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=False) # would update added_to_db_at
|
||||
|
||||
logger.info(f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums.")
|
||||
|
||||
update_artist_metadata_after_check(artist_spotify_id, api_reported_total_albums)
|
||||
logger.info(f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}", exc_info=True)
|
||||
|
||||
time.sleep(max(1, config.get("delay_between_artists_seconds", 5)))
|
||||
|
||||
logger.info("Artist Watch Manager: Finished checking all watched artists.")
|
||||
|
||||
def playlist_watch_scheduler():
|
||||
"""Periodically calls check_watched_playlists and check_watched_artists."""
|
||||
logger.info("Watch Scheduler: Thread started.")
|
||||
config = get_watch_config() # Load config once at start, or reload each loop? Reload each loop for dynamic changes.
|
||||
|
||||
while not STOP_EVENT.is_set():
|
||||
current_config = get_watch_config() # Get latest config for this run
|
||||
interval = current_config.get("watchPollIntervalSeconds", 3600)
|
||||
|
||||
try:
|
||||
logger.info("Watch Scheduler: Starting playlist check run.")
|
||||
check_watched_playlists()
|
||||
logger.info("Watch Scheduler: Playlist check run completed.")
|
||||
except Exception as e:
|
||||
logger.error(f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}", exc_info=True)
|
||||
|
||||
# Add a small delay between playlist and artist checks if desired
|
||||
# time.sleep(current_config.get("delay_between_check_types_seconds", 10))
|
||||
if STOP_EVENT.is_set(): break # Check stop event again before starting artist check
|
||||
|
||||
try:
|
||||
logger.info("Watch Scheduler: Starting artist check run.")
|
||||
check_watched_artists()
|
||||
logger.info("Watch Scheduler: Artist check run completed.")
|
||||
except Exception as e:
|
||||
logger.error(f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}", exc_info=True)
|
||||
|
||||
logger.info(f"Watch Scheduler: All checks complete. Next run in {interval} seconds.")
|
||||
STOP_EVENT.wait(interval)
|
||||
logger.info("Watch Scheduler: Thread stopped.")
|
||||
|
||||
# --- Global thread for the scheduler ---
|
||||
_watch_scheduler_thread = None # Renamed from _playlist_watch_thread
|
||||
|
||||
def start_watch_manager(): # Renamed from start_playlist_watch_manager
|
||||
global _watch_scheduler_thread
|
||||
if _watch_scheduler_thread is None or not _watch_scheduler_thread.is_alive():
|
||||
STOP_EVENT.clear()
|
||||
# Initialize DBs on start
|
||||
from routes.utils.watch.db import init_playlists_db, init_artists_db # Updated import
|
||||
init_playlists_db() # For playlists
|
||||
init_artists_db() # For artists
|
||||
|
||||
_watch_scheduler_thread = threading.Thread(target=playlist_watch_scheduler, daemon=True)
|
||||
_watch_scheduler_thread.start()
|
||||
logger.info("Watch Manager: Background scheduler started (includes playlists and artists).")
|
||||
else:
|
||||
logger.info("Watch Manager: Background scheduler already running.")
|
||||
|
||||
def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
|
||||
global _watch_scheduler_thread
|
||||
if _watch_scheduler_thread and _watch_scheduler_thread.is_alive():
|
||||
logger.info("Watch Manager: Stopping background scheduler...")
|
||||
STOP_EVENT.set()
|
||||
_watch_scheduler_thread.join(timeout=10)
|
||||
if _watch_scheduler_thread.is_alive():
|
||||
logger.warning("Watch Manager: Scheduler thread did not stop in time.")
|
||||
else:
|
||||
logger.info("Watch Manager: Background scheduler stopped.")
|
||||
_watch_scheduler_thread = None
|
||||
else:
|
||||
logger.info("Watch Manager: Background scheduler not running.")
|
||||
|
||||
# If this module is imported, and you want to auto-start the manager, you could call start_watch_manager() here.
|
||||
# However, it's usually better to explicitly start it from the main application/__init__.py.
|
||||
Reference in New Issue
Block a user