41
.pre-commit-config.yaml
Normal file
41
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
repos:
|
||||
# Various general + format-specific helpers
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-symlinks
|
||||
- id: trailing-whitespace
|
||||
- id: mixed-line-ending
|
||||
args: [--fix=lf]
|
||||
- id: check-yaml
|
||||
exclude: 'mkdocs.yml'
|
||||
- id: check-toml
|
||||
- id: check-json
|
||||
- id: check-ast
|
||||
- id: debug-statements
|
||||
- id: check-merge-conflict
|
||||
- id: check-shebang-scripts-are-executable
|
||||
- id: check-added-large-files
|
||||
args: [--maxkb=10000]
|
||||
- repo: https://github.com/python-jsonschema/check-jsonschema
|
||||
rev: '0.33.0'
|
||||
hooks:
|
||||
- id: check-github-workflows
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.11.13
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
types_or: [python, pyi, jupyter]
|
||||
args: [--fix]
|
||||
# Run the formatter.
|
||||
- id: ruff-format
|
||||
types_or: [python, pyi, jupyter]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: 'v1.16.0'
|
||||
hooks:
|
||||
- id: mypy
|
||||
args: [--no-strict-optional, --ignore-missing-imports]
|
||||
# NOTE: you might need to add some deps here:
|
||||
additional_dependencies: [waitress==3.0.2, types-waitress]
|
||||
108
app.py
108
app.py
@@ -21,19 +21,20 @@ import socket
|
||||
from urllib.parse import urlparse
|
||||
|
||||
# Import Celery configuration and manager
|
||||
from routes.utils.celery_tasks import celery_app
|
||||
from routes.utils.celery_manager import celery_manager
|
||||
from routes.utils.celery_config import REDIS_URL
|
||||
from routes.utils.history_manager import init_history_db
|
||||
|
||||
|
||||
# Configure application-wide logging
|
||||
def setup_logging():
|
||||
"""Configure application-wide logging with rotation"""
|
||||
# Create logs directory if it doesn't exist
|
||||
logs_dir = Path('logs')
|
||||
logs_dir = Path("logs")
|
||||
logs_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Set up log file paths
|
||||
main_log = logs_dir / 'spotizerr.log'
|
||||
main_log = logs_dir / "spotizerr.log"
|
||||
|
||||
# Configure root logger
|
||||
root_logger = logging.getLogger()
|
||||
@@ -45,13 +46,13 @@ def setup_logging():
|
||||
|
||||
# Log formatting
|
||||
log_format = logging.Formatter(
|
||||
'%(asctime)s [%(processName)s:%(threadName)s] [%(name)s] [%(levelname)s] - %(message)s',
|
||||
datefmt='%Y-%m-%d %H:%M:%S'
|
||||
"%(asctime)s [%(processName)s:%(threadName)s] [%(name)s] [%(levelname)s] - %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
|
||||
# File handler with rotation (10 MB max, keep 5 backups)
|
||||
file_handler = logging.handlers.RotatingFileHandler(
|
||||
main_log, maxBytes=10*1024*1024, backupCount=5, encoding='utf-8'
|
||||
main_log, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8"
|
||||
)
|
||||
file_handler.setFormatter(log_format)
|
||||
file_handler.setLevel(logging.INFO)
|
||||
@@ -66,13 +67,13 @@ def setup_logging():
|
||||
root_logger.addHandler(console_handler)
|
||||
|
||||
# Set up specific loggers
|
||||
for logger_name in ['werkzeug', 'celery', 'routes', 'flask', 'waitress']:
|
||||
for logger_name in ["werkzeug", "celery", "routes", "flask", "waitress"]:
|
||||
module_logger = logging.getLogger(logger_name)
|
||||
module_logger.setLevel(logging.INFO)
|
||||
# Handlers are inherited from root logger
|
||||
|
||||
# Enable propagation for all loggers
|
||||
logging.getLogger('celery').propagate = True
|
||||
logging.getLogger("celery").propagate = True
|
||||
|
||||
# Notify successful setup
|
||||
root_logger.info("Logging system initialized")
|
||||
@@ -80,6 +81,7 @@ def setup_logging():
|
||||
# Return the main file handler for permissions adjustment
|
||||
return file_handler
|
||||
|
||||
|
||||
def check_redis_connection():
|
||||
"""Check if Redis is reachable and retry with exponential backoff if not"""
|
||||
max_retries = 5
|
||||
@@ -114,7 +116,9 @@ def check_redis_connection():
|
||||
sock.close()
|
||||
|
||||
if result != 0:
|
||||
raise ConnectionError(f"Cannot connect to Redis at {redis_host}:{redis_port}")
|
||||
raise ConnectionError(
|
||||
f"Cannot connect to Redis at {redis_host}:{redis_port}"
|
||||
)
|
||||
|
||||
# If socket connection successful, try Redis ping
|
||||
r = redis.Redis.from_url(REDIS_URL)
|
||||
@@ -124,8 +128,12 @@ def check_redis_connection():
|
||||
except Exception as e:
|
||||
retry_count += 1
|
||||
if retry_count >= max_retries:
|
||||
logging.error(f"Failed to connect to Redis after {max_retries} attempts: {e}")
|
||||
logging.error(f"Make sure Redis is running at {redis_host}:{redis_port}")
|
||||
logging.error(
|
||||
f"Failed to connect to Redis after {max_retries} attempts: {e}"
|
||||
)
|
||||
logging.error(
|
||||
f"Make sure Redis is running at {redis_host}:{redis_port}"
|
||||
)
|
||||
return False
|
||||
|
||||
logging.warning(f"Redis connection attempt {retry_count} failed: {e}")
|
||||
@@ -135,71 +143,75 @@ def check_redis_connection():
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def create_app():
|
||||
app = Flask(__name__, template_folder='static/html')
|
||||
app = Flask(__name__, template_folder="static/html")
|
||||
|
||||
# Set up CORS
|
||||
CORS(app)
|
||||
|
||||
# Initialize databases
|
||||
init_history_db()
|
||||
|
||||
# Register blueprints
|
||||
app.register_blueprint(config_bp, url_prefix='/api')
|
||||
app.register_blueprint(search_bp, url_prefix='/api')
|
||||
app.register_blueprint(credentials_bp, url_prefix='/api/credentials')
|
||||
app.register_blueprint(album_bp, url_prefix='/api/album')
|
||||
app.register_blueprint(track_bp, url_prefix='/api/track')
|
||||
app.register_blueprint(playlist_bp, url_prefix='/api/playlist')
|
||||
app.register_blueprint(artist_bp, url_prefix='/api/artist')
|
||||
app.register_blueprint(prgs_bp, url_prefix='/api/prgs')
|
||||
app.register_blueprint(history_bp, url_prefix='/api/history')
|
||||
app.register_blueprint(config_bp, url_prefix="/api")
|
||||
app.register_blueprint(search_bp, url_prefix="/api")
|
||||
app.register_blueprint(credentials_bp, url_prefix="/api/credentials")
|
||||
app.register_blueprint(album_bp, url_prefix="/api/album")
|
||||
app.register_blueprint(track_bp, url_prefix="/api/track")
|
||||
app.register_blueprint(playlist_bp, url_prefix="/api/playlist")
|
||||
app.register_blueprint(artist_bp, url_prefix="/api/artist")
|
||||
app.register_blueprint(prgs_bp, url_prefix="/api/prgs")
|
||||
app.register_blueprint(history_bp, url_prefix="/api/history")
|
||||
|
||||
# Serve frontend
|
||||
@app.route('/')
|
||||
@app.route("/")
|
||||
def serve_index():
|
||||
return render_template('main.html')
|
||||
return render_template("main.html")
|
||||
|
||||
# Config page route
|
||||
@app.route('/config')
|
||||
@app.route("/config")
|
||||
def serve_config():
|
||||
return render_template('config.html')
|
||||
return render_template("config.html")
|
||||
|
||||
# New route: Serve watch.html under /watchlist
|
||||
@app.route('/watchlist')
|
||||
@app.route("/watchlist")
|
||||
def serve_watchlist():
|
||||
return render_template('watch.html')
|
||||
return render_template("watch.html")
|
||||
|
||||
# New route: Serve playlist.html under /playlist/<id>
|
||||
@app.route('/playlist/<id>')
|
||||
@app.route("/playlist/<id>")
|
||||
def serve_playlist(id):
|
||||
# The id parameter is captured, but you can use it as needed.
|
||||
return render_template('playlist.html')
|
||||
return render_template("playlist.html")
|
||||
|
||||
@app.route('/album/<id>')
|
||||
@app.route("/album/<id>")
|
||||
def serve_album(id):
|
||||
# The id parameter is captured, but you can use it as needed.
|
||||
return render_template('album.html')
|
||||
return render_template("album.html")
|
||||
|
||||
@app.route('/track/<id>')
|
||||
@app.route("/track/<id>")
|
||||
def serve_track(id):
|
||||
# The id parameter is captured, but you can use it as needed.
|
||||
return render_template('track.html')
|
||||
return render_template("track.html")
|
||||
|
||||
@app.route('/artist/<id>')
|
||||
@app.route("/artist/<id>")
|
||||
def serve_artist(id):
|
||||
# The id parameter is captured, but you can use it as needed.
|
||||
return render_template('artist.html')
|
||||
return render_template("artist.html")
|
||||
|
||||
@app.route('/history')
|
||||
@app.route("/history")
|
||||
def serve_history_page():
|
||||
return render_template('history.html')
|
||||
return render_template("history.html")
|
||||
|
||||
@app.route('/static/<path:path>')
|
||||
@app.route("/static/<path:path>")
|
||||
def serve_static(path):
|
||||
return send_from_directory('static', path)
|
||||
return send_from_directory("static", path)
|
||||
|
||||
# Serve favicon.ico from the same directory as index.html (templates)
|
||||
@app.route('/favicon.ico')
|
||||
@app.route("/favicon.ico")
|
||||
def serve_favicon():
|
||||
return send_from_directory('static/html', 'favicon.ico')
|
||||
return send_from_directory("static/html", "favicon.ico")
|
||||
|
||||
# Add request logging middleware
|
||||
@app.before_request
|
||||
@@ -209,7 +221,7 @@ def create_app():
|
||||
|
||||
@app.after_request
|
||||
def log_response(response):
|
||||
if hasattr(request, 'start_time'):
|
||||
if hasattr(request, "start_time"):
|
||||
duration = round((time.time() - request.start_time) * 1000, 2)
|
||||
app.logger.debug(f"Response: {response.status} | Duration: {duration}ms")
|
||||
return response
|
||||
@@ -222,6 +234,7 @@ def create_app():
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def start_celery_workers():
|
||||
"""Start Celery workers with dynamic configuration"""
|
||||
logging.info("Starting Celery workers with dynamic configuration")
|
||||
@@ -230,15 +243,16 @@ def start_celery_workers():
|
||||
# Register shutdown handler
|
||||
atexit.register(celery_manager.stop)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Configure application logging
|
||||
log_handler = setup_logging()
|
||||
|
||||
# Set file permissions for log files if needed
|
||||
try:
|
||||
os.chmod(log_handler.baseFilename, 0o666)
|
||||
except:
|
||||
logging.warning("Could not set permissions on log file")
|
||||
except (OSError, FileNotFoundError) as e:
|
||||
logging.warning(f"Could not set permissions on log file: {str(e)}")
|
||||
|
||||
# Log application startup
|
||||
logging.info("=== Spotizerr Application Starting ===")
|
||||
@@ -247,6 +261,7 @@ if __name__ == '__main__':
|
||||
if check_redis_connection():
|
||||
# Start Watch Manager
|
||||
from routes.utils.watch.manager import start_watch_manager
|
||||
|
||||
start_watch_manager()
|
||||
|
||||
# Start Celery workers
|
||||
@@ -256,7 +271,8 @@ if __name__ == '__main__':
|
||||
app = create_app()
|
||||
logging.info("Starting Flask server on port 7171")
|
||||
from waitress import serve
|
||||
serve(app, host='0.0.0.0', port=7171)
|
||||
|
||||
serve(app, host="0.0.0.0", port=7171)
|
||||
else:
|
||||
logging.error("Cannot start application: Redis connection failed")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -3,22 +3,26 @@ import atexit
|
||||
|
||||
# Configure basic logging for the application if not already configured
|
||||
# This is a good place for it if routes are a central part of your app structure.
|
||||
logging.basicConfig(level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from routes.utils.watch.manager import start_watch_manager, stop_watch_manager
|
||||
|
||||
# Start the playlist watch manager when the application/blueprint is initialized
|
||||
start_watch_manager()
|
||||
# Register the stop function to be called on application exit
|
||||
atexit.register(stop_watch_manager)
|
||||
logger.info("Playlist Watch Manager initialized and registered for shutdown.")
|
||||
except ImportError as e:
|
||||
logger.error(f"Could not import or start Playlist Watch Manager: {e}. Playlist watching will be disabled.")
|
||||
logger.error(
|
||||
f"Could not import or start Playlist Watch Manager: {e}. Playlist watching will be disabled."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"An unexpected error occurred during Playlist Watch Manager setup: {e}", exc_info=True)
|
||||
|
||||
from .artist import artist_bp
|
||||
from .prgs import prgs_bp
|
||||
logger.error(
|
||||
f"An unexpected error occurred during Playlist Watch Manager setup: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
111
routes/album.py
111
routes/album.py
@@ -1,6 +1,5 @@
|
||||
from flask import Blueprint, Response, request
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
import uuid
|
||||
import time
|
||||
@@ -8,9 +7,10 @@ from routes.utils.celery_queue_manager import download_queue_manager
|
||||
from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState
|
||||
from routes.utils.get_info import get_spotify_info
|
||||
|
||||
album_bp = Blueprint('album', __name__)
|
||||
album_bp = Blueprint("album", __name__)
|
||||
|
||||
@album_bp.route('/download/<album_id>', methods=['GET'])
|
||||
|
||||
@album_bp.route("/download/<album_id>", methods=["GET"])
|
||||
def handle_download(album_id):
|
||||
# Retrieve essential parameters from the request.
|
||||
# name = request.args.get('name')
|
||||
@@ -22,21 +22,33 @@ def handle_download(album_id):
|
||||
# Fetch metadata from Spotify
|
||||
try:
|
||||
album_info = get_spotify_info(album_id, "album")
|
||||
if not album_info or not album_info.get('name') or not album_info.get('artists'):
|
||||
if (
|
||||
not album_info
|
||||
or not album_info.get("name")
|
||||
or not album_info.get("artists")
|
||||
):
|
||||
return Response(
|
||||
json.dumps({"error": f"Could not retrieve metadata for album ID: {album_id}"}),
|
||||
json.dumps(
|
||||
{"error": f"Could not retrieve metadata for album ID: {album_id}"}
|
||||
),
|
||||
status=404,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
name_from_spotify = album_info.get('name')
|
||||
artist_from_spotify = album_info['artists'][0].get('name') if album_info['artists'] else "Unknown Artist"
|
||||
name_from_spotify = album_info.get("name")
|
||||
artist_from_spotify = (
|
||||
album_info["artists"][0].get("name")
|
||||
if album_info["artists"]
|
||||
else "Unknown Artist"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
json.dumps({"error": f"Failed to fetch metadata for album {album_id}: {str(e)}"}),
|
||||
json.dumps(
|
||||
{"error": f"Failed to fetch metadata for album {album_id}: {str(e)}"}
|
||||
),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
# Validate required parameters
|
||||
@@ -44,7 +56,7 @@ def handle_download(album_id):
|
||||
return Response(
|
||||
json.dumps({"error": "Missing required parameter: url"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
# Add the task to the queue with only essential parameters
|
||||
@@ -53,98 +65,97 @@ def handle_download(album_id):
|
||||
orig_params = request.args.to_dict()
|
||||
orig_params["original_url"] = request.url
|
||||
try:
|
||||
task_id = download_queue_manager.add_task({
|
||||
task_id = download_queue_manager.add_task(
|
||||
{
|
||||
"download_type": "album",
|
||||
"url": url,
|
||||
"name": name_from_spotify,
|
||||
"artist": artist_from_spotify,
|
||||
"orig_request": orig_params
|
||||
})
|
||||
"orig_request": orig_params,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
# Generic error handling for other issues during task submission
|
||||
# Create an error task ID if add_task itself fails before returning an ID
|
||||
error_task_id = str(uuid.uuid4())
|
||||
|
||||
store_task_info(error_task_id, {
|
||||
store_task_info(
|
||||
error_task_id,
|
||||
{
|
||||
"download_type": "album",
|
||||
"url": url,
|
||||
"name": name_from_spotify,
|
||||
"artist": artist_from_spotify,
|
||||
"original_request": orig_params,
|
||||
"created_at": time.time(),
|
||||
"is_submission_error_task": True
|
||||
})
|
||||
store_task_status(error_task_id, {
|
||||
"is_submission_error_task": True,
|
||||
},
|
||||
)
|
||||
store_task_status(
|
||||
error_task_id,
|
||||
{
|
||||
"status": ProgressState.ERROR,
|
||||
"error": f"Failed to queue album download: {str(e)}",
|
||||
"timestamp": time.time()
|
||||
})
|
||||
"timestamp": time.time(),
|
||||
},
|
||||
)
|
||||
return Response(
|
||||
json.dumps({"error": f"Failed to queue album download: {str(e)}", "task_id": error_task_id}),
|
||||
json.dumps(
|
||||
{
|
||||
"error": f"Failed to queue album download: {str(e)}",
|
||||
"task_id": error_task_id,
|
||||
}
|
||||
),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
return Response(
|
||||
json.dumps({"prg_file": task_id}),
|
||||
status=202,
|
||||
mimetype='application/json'
|
||||
json.dumps({"prg_file": task_id}), status=202, mimetype="application/json"
|
||||
)
|
||||
|
||||
@album_bp.route('/download/cancel', methods=['GET'])
|
||||
|
||||
@album_bp.route("/download/cancel", methods=["GET"])
|
||||
def cancel_download():
|
||||
"""
|
||||
Cancel a running download process by its prg file name.
|
||||
"""
|
||||
prg_file = request.args.get('prg_file')
|
||||
prg_file = request.args.get("prg_file")
|
||||
if not prg_file:
|
||||
return Response(
|
||||
json.dumps({"error": "Missing process id (prg_file) parameter"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
# Use the queue manager's cancellation method.
|
||||
result = download_queue_manager.cancel_task(prg_file)
|
||||
status_code = 200 if result.get("status") == "cancelled" else 404
|
||||
|
||||
return Response(
|
||||
json.dumps(result),
|
||||
status=status_code,
|
||||
mimetype='application/json'
|
||||
)
|
||||
return Response(json.dumps(result), status=status_code, mimetype="application/json")
|
||||
|
||||
@album_bp.route('/info', methods=['GET'])
|
||||
|
||||
@album_bp.route("/info", methods=["GET"])
|
||||
def get_album_info():
|
||||
"""
|
||||
Retrieve Spotify album metadata given a Spotify album ID.
|
||||
Expects a query parameter 'id' that contains the Spotify album ID.
|
||||
"""
|
||||
spotify_id = request.args.get('id')
|
||||
spotify_id = request.args.get("id")
|
||||
|
||||
if not spotify_id:
|
||||
return Response(
|
||||
json.dumps({"error": "Missing parameter: id"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
try:
|
||||
# Import and use the get_spotify_info function from the utility module.
|
||||
from routes.utils.get_info import get_spotify_info
|
||||
|
||||
album_info = get_spotify_info(spotify_id, "album")
|
||||
return Response(
|
||||
json.dumps(album_info),
|
||||
status=200,
|
||||
mimetype='application/json'
|
||||
)
|
||||
return Response(json.dumps(album_info), status=200, mimetype="application/json")
|
||||
except Exception as e:
|
||||
error_data = {
|
||||
"error": str(e),
|
||||
"traceback": traceback.format_exc()
|
||||
}
|
||||
return Response(
|
||||
json.dumps(error_data),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
)
|
||||
error_data = {"error": str(e), "traceback": traceback.format_exc()}
|
||||
return Response(json.dumps(error_data), status=500, mimetype="application/json")
|
||||
|
||||
363
routes/artist.py
363
routes/artist.py
@@ -1,13 +1,10 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Artist endpoint blueprint.
|
||||
"""
|
||||
|
||||
from flask import Blueprint, Response, request, jsonify
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
from routes.utils.celery_queue_manager import download_queue_manager
|
||||
from routes.utils.artist import download_artist_albums
|
||||
|
||||
# Imports for merged watch functionality
|
||||
@@ -20,22 +17,23 @@ from routes.utils.watch.db import (
|
||||
get_watched_artists,
|
||||
add_specific_albums_to_artist_table,
|
||||
remove_specific_albums_from_artist_table,
|
||||
is_album_in_artist_db
|
||||
is_album_in_artist_db,
|
||||
)
|
||||
from routes.utils.watch.manager import check_watched_artists, get_watch_config
|
||||
from routes.utils.get_info import get_spotify_info
|
||||
|
||||
artist_bp = Blueprint('artist', __name__, url_prefix='/api/artist')
|
||||
artist_bp = Blueprint("artist", __name__, url_prefix="/api/artist")
|
||||
|
||||
# Existing log_json can be used, or a logger instance.
|
||||
# Let's initialize a logger for consistency with merged code.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def log_json(message_dict):
|
||||
print(json.dumps(message_dict))
|
||||
|
||||
|
||||
@artist_bp.route('/download/<artist_id>', methods=['GET'])
|
||||
@artist_bp.route("/download/<artist_id>", methods=["GET"])
|
||||
def handle_artist_download(artist_id):
|
||||
"""
|
||||
Enqueues album download tasks for the given artist.
|
||||
@@ -46,14 +44,14 @@ def handle_artist_download(artist_id):
|
||||
url = f"https://open.spotify.com/artist/{artist_id}"
|
||||
|
||||
# Retrieve essential parameters from the request.
|
||||
album_type = request.args.get('album_type', "album,single,compilation")
|
||||
album_type = request.args.get("album_type", "album,single,compilation")
|
||||
|
||||
# Validate required parameters
|
||||
if not url: # This check is mostly for safety, as url is constructed
|
||||
return Response(
|
||||
json.dumps({"error": "Missing required parameter: url"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -62,39 +60,41 @@ def handle_artist_download(artist_id):
|
||||
|
||||
# Delegate to the download_artist_albums function which will handle album filtering
|
||||
successfully_queued_albums, duplicate_albums = download_artist_albums(
|
||||
url=url,
|
||||
album_type=album_type,
|
||||
request_args=request.args.to_dict()
|
||||
url=url, album_type=album_type, request_args=request.args.to_dict()
|
||||
)
|
||||
|
||||
# Return the list of album task IDs.
|
||||
response_data = {
|
||||
"status": "complete",
|
||||
"message": f"Artist discography processing initiated. {len(successfully_queued_albums)} albums queued.",
|
||||
"queued_albums": successfully_queued_albums
|
||||
"queued_albums": successfully_queued_albums,
|
||||
}
|
||||
if duplicate_albums:
|
||||
response_data["duplicate_albums"] = duplicate_albums
|
||||
response_data["message"] += f" {len(duplicate_albums)} albums were already in progress or queued."
|
||||
response_data["message"] += (
|
||||
f" {len(duplicate_albums)} albums were already in progress or queued."
|
||||
)
|
||||
|
||||
return Response(
|
||||
json.dumps(response_data),
|
||||
status=202, # Still 202 Accepted as some operations may have succeeded
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
json.dumps({
|
||||
json.dumps(
|
||||
{
|
||||
"status": "error",
|
||||
"message": str(e),
|
||||
"traceback": traceback.format_exc()
|
||||
}),
|
||||
"traceback": traceback.format_exc(),
|
||||
}
|
||||
),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
|
||||
@artist_bp.route('/download/cancel', methods=['GET'])
|
||||
@artist_bp.route("/download/cancel", methods=["GET"])
|
||||
def cancel_artist_download():
|
||||
"""
|
||||
Cancelling an artist download is not supported since the endpoint only enqueues album tasks.
|
||||
@@ -103,23 +103,23 @@ def cancel_artist_download():
|
||||
return Response(
|
||||
json.dumps({"error": "Artist download cancellation is not supported."}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
|
||||
@artist_bp.route('/info', methods=['GET'])
|
||||
@artist_bp.route("/info", methods=["GET"])
|
||||
def get_artist_info():
|
||||
"""
|
||||
Retrieves Spotify artist metadata given a Spotify artist ID.
|
||||
Expects a query parameter 'id' with the Spotify artist ID.
|
||||
"""
|
||||
spotify_id = request.args.get('id')
|
||||
spotify_id = request.args.get("id")
|
||||
|
||||
if not spotify_id:
|
||||
return Response(
|
||||
json.dumps({"error": "Missing parameter: id"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -127,36 +127,37 @@ def get_artist_info():
|
||||
|
||||
# If artist_info is successfully fetched (it contains album items),
|
||||
# check if the artist is watched and augment album items with is_locally_known status
|
||||
if artist_info and artist_info.get('items'):
|
||||
watched_artist_details = get_watched_artist(spotify_id) # spotify_id is the artist ID
|
||||
if artist_info and artist_info.get("items"):
|
||||
watched_artist_details = get_watched_artist(
|
||||
spotify_id
|
||||
) # spotify_id is the artist ID
|
||||
if watched_artist_details: # Artist is being watched
|
||||
for album_item in artist_info['items']:
|
||||
if album_item and album_item.get('id'):
|
||||
album_id = album_item['id']
|
||||
album_item['is_locally_known'] = is_album_in_artist_db(spotify_id, album_id)
|
||||
for album_item in artist_info["items"]:
|
||||
if album_item and album_item.get("id"):
|
||||
album_id = album_item["id"]
|
||||
album_item["is_locally_known"] = is_album_in_artist_db(
|
||||
spotify_id, album_id
|
||||
)
|
||||
elif album_item: # Album object exists but no ID
|
||||
album_item['is_locally_known'] = False
|
||||
album_item["is_locally_known"] = False
|
||||
# If not watched, or no albums, is_locally_known will not be added.
|
||||
# Frontend should handle absence of this key as false.
|
||||
|
||||
return Response(
|
||||
json.dumps(artist_info),
|
||||
status=200,
|
||||
mimetype='application/json'
|
||||
json.dumps(artist_info), status=200, mimetype="application/json"
|
||||
)
|
||||
except Exception as e:
|
||||
return Response(
|
||||
json.dumps({
|
||||
"error": str(e),
|
||||
"traceback": traceback.format_exc()
|
||||
}),
|
||||
json.dumps({"error": str(e), "traceback": traceback.format_exc()}),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
|
||||
# --- Merged Artist Watch Routes ---
|
||||
|
||||
@artist_bp.route('/watch/<string:artist_spotify_id>', methods=['PUT'])
|
||||
|
||||
@artist_bp.route("/watch/<string:artist_spotify_id>", methods=["PUT"])
|
||||
def add_artist_to_watchlist(artist_spotify_id):
|
||||
"""Adds an artist to the watchlist."""
|
||||
watch_config = get_watch_config()
|
||||
@@ -166,31 +167,60 @@ def add_artist_to_watchlist(artist_spotify_id):
|
||||
logger.info(f"Attempting to add artist {artist_spotify_id} to watchlist.")
|
||||
try:
|
||||
if get_watched_artist(artist_spotify_id):
|
||||
return jsonify({"message": f"Artist {artist_spotify_id} is already being watched."}), 200
|
||||
return jsonify(
|
||||
{"message": f"Artist {artist_spotify_id} is already being watched."}
|
||||
), 200
|
||||
|
||||
# This call returns an album list-like structure based on logs
|
||||
artist_album_list_data = get_spotify_info(artist_spotify_id, "artist_discography")
|
||||
artist_album_list_data = get_spotify_info(
|
||||
artist_spotify_id, "artist_discography"
|
||||
)
|
||||
|
||||
# Check if we got any data and if it has items
|
||||
if not artist_album_list_data or not isinstance(artist_album_list_data.get('items'), list):
|
||||
logger.error(f"Could not fetch album list details for artist {artist_spotify_id} from Spotify using get_spotify_info('artist_discography'). Data: {artist_album_list_data}")
|
||||
return jsonify({"error": f"Could not fetch sufficient details for artist {artist_spotify_id} to initiate watch."}), 404
|
||||
if not artist_album_list_data or not isinstance(
|
||||
artist_album_list_data.get("items"), list
|
||||
):
|
||||
logger.error(
|
||||
f"Could not fetch album list details for artist {artist_spotify_id} from Spotify using get_spotify_info('artist_discography'). Data: {artist_album_list_data}"
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"error": f"Could not fetch sufficient details for artist {artist_spotify_id} to initiate watch."
|
||||
}
|
||||
), 404
|
||||
|
||||
# Attempt to extract artist name and verify ID
|
||||
# The actual artist name might be consistently found in the items, if they exist
|
||||
artist_name_from_albums = "Unknown Artist" # Default
|
||||
if artist_album_list_data['items']:
|
||||
first_album = artist_album_list_data['items'][0]
|
||||
if first_album and isinstance(first_album.get('artists'), list) and first_album['artists']:
|
||||
if artist_album_list_data["items"]:
|
||||
first_album = artist_album_list_data["items"][0]
|
||||
if (
|
||||
first_album
|
||||
and isinstance(first_album.get("artists"), list)
|
||||
and first_album["artists"]
|
||||
):
|
||||
# Find the artist in the list that matches the artist_spotify_id
|
||||
found_artist = next((art for art in first_album['artists'] if art.get('id') == artist_spotify_id), None)
|
||||
if found_artist and found_artist.get('name'):
|
||||
artist_name_from_albums = found_artist['name']
|
||||
elif first_album['artists'][0].get('name'): # Fallback to first artist if specific match not found or no ID
|
||||
artist_name_from_albums = first_album['artists'][0]['name']
|
||||
logger.warning(f"Could not find exact artist ID {artist_spotify_id} in first album's artists list. Using name '{artist_name_from_albums}'.")
|
||||
found_artist = next(
|
||||
(
|
||||
art
|
||||
for art in first_album["artists"]
|
||||
if art.get("id") == artist_spotify_id
|
||||
),
|
||||
None,
|
||||
)
|
||||
if found_artist and found_artist.get("name"):
|
||||
artist_name_from_albums = found_artist["name"]
|
||||
elif first_album["artists"][0].get(
|
||||
"name"
|
||||
): # Fallback to first artist if specific match not found or no ID
|
||||
artist_name_from_albums = first_album["artists"][0]["name"]
|
||||
logger.warning(
|
||||
f"Could not find exact artist ID {artist_spotify_id} in first album's artists list. Using name '{artist_name_from_albums}'."
|
||||
)
|
||||
else:
|
||||
logger.warning(f"No album items found for artist {artist_spotify_id} to extract name. Using default.")
|
||||
logger.warning(
|
||||
f"No album items found for artist {artist_spotify_id} to extract name. Using default."
|
||||
)
|
||||
|
||||
# Construct the artist_data object expected by add_artist_db
|
||||
# We use the provided artist_spotify_id as the primary ID.
|
||||
@@ -198,20 +228,29 @@ def add_artist_to_watchlist(artist_spotify_id):
|
||||
"id": artist_spotify_id, # This is the crucial part
|
||||
"name": artist_name_from_albums,
|
||||
"albums": { # Mimic structure if add_artist_db expects it for total_albums
|
||||
"total": artist_album_list_data.get('total', 0)
|
||||
}
|
||||
"total": artist_album_list_data.get("total", 0)
|
||||
},
|
||||
# Add any other fields add_artist_db might expect from a true artist object if necessary
|
||||
}
|
||||
|
||||
add_artist_db(artist_data_for_db)
|
||||
|
||||
logger.info(f"Artist {artist_spotify_id} ('{artist_name_from_albums}') added to watchlist. Their albums will be processed by the watch manager.")
|
||||
return jsonify({"message": f"Artist {artist_spotify_id} added to watchlist. Albums will be processed shortly."}), 201
|
||||
logger.info(
|
||||
f"Artist {artist_spotify_id} ('{artist_name_from_albums}') added to watchlist. Their albums will be processed by the watch manager."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Artist {artist_spotify_id} added to watchlist. Albums will be processed shortly."
|
||||
}
|
||||
), 201
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding artist {artist_spotify_id} to watchlist: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error adding artist {artist_spotify_id} to watchlist: {e}", exc_info=True
|
||||
)
|
||||
return jsonify({"error": f"Could not add artist to watchlist: {str(e)}"}), 500
|
||||
|
||||
@artist_bp.route('/watch/<string:artist_spotify_id>/status', methods=['GET'])
|
||||
|
||||
@artist_bp.route("/watch/<string:artist_spotify_id>/status", methods=["GET"])
|
||||
def get_artist_watch_status(artist_spotify_id):
|
||||
"""Checks if a specific artist is being watched."""
|
||||
logger.info(f"Checking watch status for artist {artist_spotify_id}.")
|
||||
@@ -222,10 +261,14 @@ def get_artist_watch_status(artist_spotify_id):
|
||||
else:
|
||||
return jsonify({"is_watched": False}), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking watch status for artist {artist_spotify_id}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error checking watch status for artist {artist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify({"error": f"Could not check watch status: {str(e)}"}), 500
|
||||
|
||||
@artist_bp.route('/watch/<string:artist_spotify_id>', methods=['DELETE'])
|
||||
|
||||
@artist_bp.route("/watch/<string:artist_spotify_id>", methods=["DELETE"])
|
||||
def remove_artist_from_watchlist(artist_spotify_id):
|
||||
"""Removes an artist from the watchlist."""
|
||||
watch_config = get_watch_config()
|
||||
@@ -235,16 +278,26 @@ def remove_artist_from_watchlist(artist_spotify_id):
|
||||
logger.info(f"Attempting to remove artist {artist_spotify_id} from watchlist.")
|
||||
try:
|
||||
if not get_watched_artist(artist_spotify_id):
|
||||
return jsonify({"error": f"Artist {artist_spotify_id} not found in watchlist."}), 404
|
||||
return jsonify(
|
||||
{"error": f"Artist {artist_spotify_id} not found in watchlist."}
|
||||
), 404
|
||||
|
||||
remove_artist_db(artist_spotify_id)
|
||||
logger.info(f"Artist {artist_spotify_id} removed from watchlist successfully.")
|
||||
return jsonify({"message": f"Artist {artist_spotify_id} removed from watchlist."}), 200
|
||||
return jsonify(
|
||||
{"message": f"Artist {artist_spotify_id} removed from watchlist."}
|
||||
), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing artist {artist_spotify_id} from watchlist: {e}", exc_info=True)
|
||||
return jsonify({"error": f"Could not remove artist from watchlist: {str(e)}"}), 500
|
||||
logger.error(
|
||||
f"Error removing artist {artist_spotify_id} from watchlist: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify(
|
||||
{"error": f"Could not remove artist from watchlist: {str(e)}"}
|
||||
), 500
|
||||
|
||||
@artist_bp.route('/watch/list', methods=['GET'])
|
||||
|
||||
@artist_bp.route("/watch/list", methods=["GET"])
|
||||
def list_watched_artists_endpoint():
|
||||
"""Lists all artists currently in the watchlist."""
|
||||
try:
|
||||
@@ -254,101 +307,201 @@ def list_watched_artists_endpoint():
|
||||
logger.error(f"Error listing watched artists: {e}", exc_info=True)
|
||||
return jsonify({"error": f"Could not list watched artists: {str(e)}"}), 500
|
||||
|
||||
@artist_bp.route('/watch/trigger_check', methods=['POST'])
|
||||
|
||||
@artist_bp.route("/watch/trigger_check", methods=["POST"])
|
||||
def trigger_artist_check_endpoint():
|
||||
"""Manually triggers the artist checking mechanism for all watched artists."""
|
||||
watch_config = get_watch_config()
|
||||
if not watch_config.get("enabled", False):
|
||||
return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Watch feature is currently disabled globally. Cannot trigger check."
|
||||
}
|
||||
), 403
|
||||
|
||||
logger.info("Manual trigger for artist check received for all artists.")
|
||||
try:
|
||||
thread = threading.Thread(target=check_watched_artists, args=(None,))
|
||||
thread.start()
|
||||
return jsonify({"message": "Artist check triggered successfully in the background for all artists."}), 202
|
||||
return jsonify(
|
||||
{
|
||||
"message": "Artist check triggered successfully in the background for all artists."
|
||||
}
|
||||
), 202
|
||||
except Exception as e:
|
||||
logger.error(f"Error manually triggering artist check for all: {e}", exc_info=True)
|
||||
return jsonify({"error": f"Could not trigger artist check for all: {str(e)}"}), 500
|
||||
logger.error(
|
||||
f"Error manually triggering artist check for all: {e}", exc_info=True
|
||||
)
|
||||
return jsonify(
|
||||
{"error": f"Could not trigger artist check for all: {str(e)}"}
|
||||
), 500
|
||||
|
||||
@artist_bp.route('/watch/trigger_check/<string:artist_spotify_id>', methods=['POST'])
|
||||
|
||||
@artist_bp.route("/watch/trigger_check/<string:artist_spotify_id>", methods=["POST"])
|
||||
def trigger_specific_artist_check_endpoint(artist_spotify_id: str):
|
||||
"""Manually triggers the artist checking mechanism for a specific artist."""
|
||||
watch_config = get_watch_config()
|
||||
if not watch_config.get("enabled", False):
|
||||
return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Watch feature is currently disabled globally. Cannot trigger check."
|
||||
}
|
||||
), 403
|
||||
|
||||
logger.info(f"Manual trigger for specific artist check received for ID: {artist_spotify_id}")
|
||||
logger.info(
|
||||
f"Manual trigger for specific artist check received for ID: {artist_spotify_id}"
|
||||
)
|
||||
try:
|
||||
watched_artist = get_watched_artist(artist_spotify_id)
|
||||
if not watched_artist:
|
||||
logger.warning(f"Trigger specific check: Artist ID {artist_spotify_id} not found in watchlist.")
|
||||
return jsonify({"error": f"Artist {artist_spotify_id} is not in the watchlist. Add it first."}), 404
|
||||
logger.warning(
|
||||
f"Trigger specific check: Artist ID {artist_spotify_id} not found in watchlist."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"error": f"Artist {artist_spotify_id} is not in the watchlist. Add it first."
|
||||
}
|
||||
), 404
|
||||
|
||||
thread = threading.Thread(target=check_watched_artists, args=(artist_spotify_id,))
|
||||
thread = threading.Thread(
|
||||
target=check_watched_artists, args=(artist_spotify_id,)
|
||||
)
|
||||
thread.start()
|
||||
logger.info(f"Artist check triggered in background for specific artist ID: {artist_spotify_id}")
|
||||
return jsonify({"message": f"Artist check triggered successfully in the background for {artist_spotify_id}."}), 202
|
||||
logger.info(
|
||||
f"Artist check triggered in background for specific artist ID: {artist_spotify_id}"
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Artist check triggered successfully in the background for {artist_spotify_id}."
|
||||
}
|
||||
), 202
|
||||
except Exception as e:
|
||||
logger.error(f"Error manually triggering specific artist check for {artist_spotify_id}: {e}", exc_info=True)
|
||||
return jsonify({"error": f"Could not trigger artist check for {artist_spotify_id}: {str(e)}"}), 500
|
||||
logger.error(
|
||||
f"Error manually triggering specific artist check for {artist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"error": f"Could not trigger artist check for {artist_spotify_id}: {str(e)}"
|
||||
}
|
||||
), 500
|
||||
|
||||
@artist_bp.route('/watch/<string:artist_spotify_id>/albums', methods=['POST'])
|
||||
|
||||
@artist_bp.route("/watch/<string:artist_spotify_id>/albums", methods=["POST"])
|
||||
def mark_albums_as_known_for_artist(artist_spotify_id):
|
||||
"""Fetches details for given album IDs and adds/updates them in the artist's local DB table."""
|
||||
watch_config = get_watch_config()
|
||||
if not watch_config.get("enabled", False):
|
||||
return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark albums."}), 403
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Watch feature is currently disabled globally. Cannot mark albums."
|
||||
}
|
||||
), 403
|
||||
|
||||
logger.info(f"Attempting to mark albums as known for artist {artist_spotify_id}.")
|
||||
try:
|
||||
album_ids = request.json
|
||||
if not isinstance(album_ids, list) or not all(isinstance(aid, str) for aid in album_ids):
|
||||
return jsonify({"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."}), 400
|
||||
if not isinstance(album_ids, list) or not all(
|
||||
isinstance(aid, str) for aid in album_ids
|
||||
):
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."
|
||||
}
|
||||
), 400
|
||||
|
||||
if not get_watched_artist(artist_spotify_id):
|
||||
return jsonify({"error": f"Artist {artist_spotify_id} is not being watched."}), 404
|
||||
return jsonify(
|
||||
{"error": f"Artist {artist_spotify_id} is not being watched."}
|
||||
), 404
|
||||
|
||||
fetched_albums_details = []
|
||||
for album_id in album_ids:
|
||||
try:
|
||||
# We need full album details. get_spotify_info with type "album" should provide this.
|
||||
album_detail = get_spotify_info(album_id, "album")
|
||||
if album_detail and album_detail.get('id'):
|
||||
if album_detail and album_detail.get("id"):
|
||||
fetched_albums_details.append(album_detail)
|
||||
else:
|
||||
logger.warning(f"Could not fetch details for album {album_id} when marking as known for artist {artist_spotify_id}.")
|
||||
logger.warning(
|
||||
f"Could not fetch details for album {album_id} when marking as known for artist {artist_spotify_id}."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch Spotify details for album {album_id}: {e}")
|
||||
logger.error(
|
||||
f"Failed to fetch Spotify details for album {album_id}: {e}"
|
||||
)
|
||||
|
||||
if not fetched_albums_details:
|
||||
return jsonify({"message": "No valid album details could be fetched to mark as known.", "processed_count": 0}), 200
|
||||
return jsonify(
|
||||
{
|
||||
"message": "No valid album details could be fetched to mark as known.",
|
||||
"processed_count": 0,
|
||||
}
|
||||
), 200
|
||||
|
||||
processed_count = add_specific_albums_to_artist_table(artist_spotify_id, fetched_albums_details)
|
||||
logger.info(f"Successfully marked/updated {processed_count} albums as known for artist {artist_spotify_id}.")
|
||||
return jsonify({"message": f"Successfully processed {processed_count} albums for artist {artist_spotify_id}."}), 200
|
||||
processed_count = add_specific_albums_to_artist_table(
|
||||
artist_spotify_id, fetched_albums_details
|
||||
)
|
||||
logger.info(
|
||||
f"Successfully marked/updated {processed_count} albums as known for artist {artist_spotify_id}."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Successfully processed {processed_count} albums for artist {artist_spotify_id}."
|
||||
}
|
||||
), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error marking albums as known for artist {artist_spotify_id}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error marking albums as known for artist {artist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify({"error": f"Could not mark albums as known: {str(e)}"}), 500
|
||||
|
||||
@artist_bp.route('/watch/<string:artist_spotify_id>/albums', methods=['DELETE'])
|
||||
|
||||
@artist_bp.route("/watch/<string:artist_spotify_id>/albums", methods=["DELETE"])
|
||||
def mark_albums_as_missing_locally_for_artist(artist_spotify_id):
|
||||
"""Removes specified albums from the artist's local DB table."""
|
||||
watch_config = get_watch_config()
|
||||
if not watch_config.get("enabled", False):
|
||||
return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark albums."}), 403
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Watch feature is currently disabled globally. Cannot mark albums."
|
||||
}
|
||||
), 403
|
||||
|
||||
logger.info(f"Attempting to mark albums as missing (delete locally) for artist {artist_spotify_id}.")
|
||||
logger.info(
|
||||
f"Attempting to mark albums as missing (delete locally) for artist {artist_spotify_id}."
|
||||
)
|
||||
try:
|
||||
album_ids = request.json
|
||||
if not isinstance(album_ids, list) or not all(isinstance(aid, str) for aid in album_ids):
|
||||
return jsonify({"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."}), 400
|
||||
if not isinstance(album_ids, list) or not all(
|
||||
isinstance(aid, str) for aid in album_ids
|
||||
):
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Invalid request body. Expecting a JSON array of album Spotify IDs."
|
||||
}
|
||||
), 400
|
||||
|
||||
if not get_watched_artist(artist_spotify_id):
|
||||
return jsonify({"error": f"Artist {artist_spotify_id} is not being watched."}), 404
|
||||
return jsonify(
|
||||
{"error": f"Artist {artist_spotify_id} is not being watched."}
|
||||
), 404
|
||||
|
||||
deleted_count = remove_specific_albums_from_artist_table(artist_spotify_id, album_ids)
|
||||
logger.info(f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}.")
|
||||
return jsonify({"message": f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}."}), 200
|
||||
deleted_count = remove_specific_albums_from_artist_table(
|
||||
artist_spotify_id, album_ids
|
||||
)
|
||||
logger.info(
|
||||
f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Successfully removed {deleted_count} albums locally for artist {artist_spotify_id}."
|
||||
}
|
||||
), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error marking albums as missing (deleting locally) for artist {artist_spotify_id}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error marking albums as missing (deleting locally) for artist {artist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify({"error": f"Could not mark albums as missing: {str(e)}"}), 500
|
||||
|
||||
121
routes/config.py
121
routes/config.py
@@ -1,65 +1,55 @@
|
||||
from flask import Blueprint, jsonify, request
|
||||
import json
|
||||
from pathlib import Path
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
# Import the centralized config getters that handle file creation and defaults
|
||||
from routes.utils.celery_config import get_config_params as get_main_config_params, DEFAULT_MAIN_CONFIG, CONFIG_FILE_PATH as MAIN_CONFIG_FILE_PATH
|
||||
from routes.utils.watch.manager import get_watch_config as get_watch_manager_config, DEFAULT_WATCH_CONFIG, CONFIG_FILE_PATH as WATCH_CONFIG_FILE_PATH
|
||||
from routes.utils.celery_config import (
|
||||
get_config_params as get_main_config_params,
|
||||
DEFAULT_MAIN_CONFIG,
|
||||
CONFIG_FILE_PATH as MAIN_CONFIG_FILE_PATH,
|
||||
)
|
||||
from routes.utils.watch.manager import (
|
||||
get_watch_config as get_watch_manager_config,
|
||||
DEFAULT_WATCH_CONFIG,
|
||||
CONFIG_FILE_PATH as WATCH_CONFIG_FILE_PATH,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
config_bp = Blueprint('config', __name__)
|
||||
config_bp = Blueprint("config", __name__)
|
||||
|
||||
# Path to main config file (consistent with celery_config.py)
|
||||
# CONFIG_PATH = Path('./data/config/main.json') # Defined as MAIN_CONFIG_FILE_PATH from import
|
||||
# Path to watch config file (consistent with watch/manager.py)
|
||||
# WATCH_CONFIG_PATH = Path('./data/config/watch.json') # Defined as WATCH_CONFIG_FILE_PATH from import
|
||||
|
||||
# Flag for config change notifications
|
||||
config_changed = False
|
||||
last_config = {}
|
||||
last_config: dict[str, Any] = {}
|
||||
|
||||
# Define parameters that should trigger notification when changed
|
||||
NOTIFY_PARAMETERS = [
|
||||
'maxConcurrentDownloads',
|
||||
'service',
|
||||
'fallback',
|
||||
'spotifyQuality',
|
||||
'deezerQuality'
|
||||
"maxConcurrentDownloads",
|
||||
"service",
|
||||
"fallback",
|
||||
"spotifyQuality",
|
||||
"deezerQuality",
|
||||
]
|
||||
|
||||
|
||||
# Helper to get main config (uses the one from celery_config)
|
||||
def get_config():
|
||||
"""Retrieves the main configuration, creating it with defaults if necessary."""
|
||||
return get_main_config_params()
|
||||
|
||||
|
||||
# Helper to save main config
|
||||
def save_config(config_data):
|
||||
"""Saves the main configuration data to main.json."""
|
||||
try:
|
||||
MAIN_CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Ensure all default keys are present before saving, merging if necessary
|
||||
current_defaults = DEFAULT_MAIN_CONFIG.copy()
|
||||
# Overlay provided data on defaults to ensure all keys are there.
|
||||
# This might not be ideal if user explicitly wants to remove a key,
|
||||
# but for this setup, ensuring defaults is safer.
|
||||
# A better approach for full PUT might be to replace entirely,
|
||||
# but for ensuring defaults, this is okay.
|
||||
# Let's assume config_data is what the user intends fully.
|
||||
# We'll rely on get_config_params to have already populated defaults if the file was new.
|
||||
# When saving, we should just save what's given, after ensuring it has necessary structure.
|
||||
|
||||
# Merge with defaults to ensure all keys are present
|
||||
# This ensures that if a user POSTs partial data, it's merged with existing/default structure
|
||||
|
||||
# Load current or default config
|
||||
existing_config = {}
|
||||
if MAIN_CONFIG_FILE_PATH.exists():
|
||||
with open(MAIN_CONFIG_FILE_PATH, 'r') as f_read:
|
||||
with open(MAIN_CONFIG_FILE_PATH, "r") as f_read:
|
||||
existing_config = json.load(f_read)
|
||||
else: # Should be rare if get_config_params was called
|
||||
existing_config = DEFAULT_MAIN_CONFIG.copy()
|
||||
@@ -73,7 +63,7 @@ def save_config(config_data):
|
||||
if default_key not in existing_config:
|
||||
existing_config[default_key] = default_value
|
||||
|
||||
with open(MAIN_CONFIG_FILE_PATH, 'w') as f:
|
||||
with open(MAIN_CONFIG_FILE_PATH, "w") as f:
|
||||
json.dump(existing_config, f, indent=4)
|
||||
logger.info(f"Main configuration saved to {MAIN_CONFIG_FILE_PATH}")
|
||||
return True, None
|
||||
@@ -81,11 +71,13 @@ def save_config(config_data):
|
||||
logger.error(f"Error saving main configuration: {e}", exc_info=True)
|
||||
return False, str(e)
|
||||
|
||||
|
||||
# Helper to get watch config (uses the one from watch/manager.py)
|
||||
def get_watch_config_http(): # Renamed to avoid conflict with the imported get_watch_config
|
||||
"""Retrieves the watch configuration, creating it with defaults if necessary."""
|
||||
return get_watch_manager_config()
|
||||
|
||||
|
||||
# Helper to save watch config
|
||||
def save_watch_config_http(watch_config_data): # Renamed
|
||||
"""Saves the watch configuration data to watch.json."""
|
||||
@@ -95,7 +87,7 @@ def save_watch_config_http(watch_config_data): # Renamed
|
||||
# Similar logic to save_config: merge with defaults/existing
|
||||
existing_config = {}
|
||||
if WATCH_CONFIG_FILE_PATH.exists():
|
||||
with open(WATCH_CONFIG_FILE_PATH, 'r') as f_read:
|
||||
with open(WATCH_CONFIG_FILE_PATH, "r") as f_read:
|
||||
existing_config = json.load(f_read)
|
||||
else: # Should be rare if get_watch_manager_config was called
|
||||
existing_config = DEFAULT_WATCH_CONFIG.copy()
|
||||
@@ -107,7 +99,7 @@ def save_watch_config_http(watch_config_data): # Renamed
|
||||
if default_key not in existing_config:
|
||||
existing_config[default_key] = default_value
|
||||
|
||||
with open(WATCH_CONFIG_FILE_PATH, 'w') as f:
|
||||
with open(WATCH_CONFIG_FILE_PATH, "w") as f:
|
||||
json.dump(existing_config, f, indent=4)
|
||||
logger.info(f"Watch configuration saved to {WATCH_CONFIG_FILE_PATH}")
|
||||
return True, None
|
||||
@@ -115,7 +107,8 @@ def save_watch_config_http(watch_config_data): # Renamed
|
||||
logger.error(f"Error saving watch configuration: {e}", exc_info=True)
|
||||
return False, str(e)
|
||||
|
||||
@config_bp.route('/config', methods=['GET'])
|
||||
|
||||
@config_bp.route("/config", methods=["GET"])
|
||||
def handle_config():
|
||||
"""Handles GET requests for the main configuration."""
|
||||
try:
|
||||
@@ -123,9 +116,12 @@ def handle_config():
|
||||
return jsonify(config)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in GET /config: {e}", exc_info=True)
|
||||
return jsonify({"error": "Failed to retrieve configuration", "details": str(e)}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to retrieve configuration", "details": str(e)}
|
||||
), 500
|
||||
|
||||
@config_bp.route('/config', methods=['POST', 'PUT'])
|
||||
|
||||
@config_bp.route("/config", methods=["POST", "PUT"])
|
||||
def update_config():
|
||||
"""Handles POST/PUT requests to update the main configuration."""
|
||||
try:
|
||||
@@ -133,12 +129,9 @@ def update_config():
|
||||
if not isinstance(new_config, dict):
|
||||
return jsonify({"error": "Invalid config format"}), 400
|
||||
|
||||
# Get existing config to preserve environment-controlled values
|
||||
existing_config = get_config() or {}
|
||||
|
||||
# Preserve the explicitFilter setting from environment
|
||||
explicit_filter_env = os.environ.get('EXPLICIT_FILTER', 'false').lower()
|
||||
new_config['explicitFilter'] = explicit_filter_env in ('true', '1', 'yes', 'on')
|
||||
explicit_filter_env = os.environ.get("EXPLICIT_FILTER", "false").lower()
|
||||
new_config["explicitFilter"] = explicit_filter_env in ("true", "1", "yes", "on")
|
||||
|
||||
success, error_msg = save_config(new_config)
|
||||
if success:
|
||||
@@ -147,33 +140,42 @@ def update_config():
|
||||
if updated_config_values is None:
|
||||
# This case should ideally not be reached if save_config succeeded
|
||||
# and get_config handles errors by returning a default or None.
|
||||
return jsonify({"error": "Failed to retrieve configuration after saving"}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to retrieve configuration after saving"}
|
||||
), 500
|
||||
|
||||
return jsonify(updated_config_values)
|
||||
else:
|
||||
return jsonify({"error": "Failed to update configuration", "details": error_msg}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to update configuration", "details": error_msg}
|
||||
), 500
|
||||
except json.JSONDecodeError:
|
||||
return jsonify({"error": "Invalid JSON data"}), 400
|
||||
except Exception as e:
|
||||
logger.error(f"Error in POST/PUT /config: {e}", exc_info=True)
|
||||
return jsonify({"error": "Failed to update configuration", "details": str(e)}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to update configuration", "details": str(e)}
|
||||
), 500
|
||||
|
||||
@config_bp.route('/config/check', methods=['GET'])
|
||||
|
||||
@config_bp.route("/config/check", methods=["GET"])
|
||||
def check_config_changes():
|
||||
# This endpoint seems more related to dynamically checking if config changed
|
||||
# on disk, which might not be necessary if settings are applied on restart
|
||||
# or by a dedicated manager. For now, just return current config.
|
||||
try:
|
||||
config = get_config()
|
||||
return jsonify({
|
||||
"message": "Current configuration retrieved.",
|
||||
"config": config
|
||||
})
|
||||
return jsonify(
|
||||
{"message": "Current configuration retrieved.", "config": config}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in GET /config/check: {e}", exc_info=True)
|
||||
return jsonify({"error": "Failed to check configuration", "details": str(e)}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to check configuration", "details": str(e)}
|
||||
), 500
|
||||
|
||||
@config_bp.route('/config/watch', methods=['GET'])
|
||||
|
||||
@config_bp.route("/config/watch", methods=["GET"])
|
||||
def handle_watch_config():
|
||||
"""Handles GET requests for the watch configuration."""
|
||||
try:
|
||||
@@ -181,9 +183,12 @@ def handle_watch_config():
|
||||
return jsonify(watch_config)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in GET /config/watch: {e}", exc_info=True)
|
||||
return jsonify({"error": "Failed to retrieve watch configuration", "details": str(e)}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to retrieve watch configuration", "details": str(e)}
|
||||
), 500
|
||||
|
||||
@config_bp.route('/config/watch', methods=['POST', 'PUT'])
|
||||
|
||||
@config_bp.route("/config/watch", methods=["POST", "PUT"])
|
||||
def update_watch_config():
|
||||
"""Handles POST/PUT requests to update the watch configuration."""
|
||||
try:
|
||||
@@ -195,9 +200,13 @@ def update_watch_config():
|
||||
if success:
|
||||
return jsonify({"message": "Watch configuration updated successfully"}), 200
|
||||
else:
|
||||
return jsonify({"error": "Failed to update watch configuration", "details": error_msg}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to update watch configuration", "details": error_msg}
|
||||
), 500
|
||||
except json.JSONDecodeError:
|
||||
return jsonify({"error": "Invalid JSON data for watch config"}), 400
|
||||
except Exception as e:
|
||||
logger.error(f"Error in POST/PUT /config/watch: {e}", exc_info=True)
|
||||
return jsonify({"error": "Failed to update watch configuration", "details": str(e)}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to update watch configuration", "details": str(e)}
|
||||
), 500
|
||||
|
||||
@@ -8,59 +8,75 @@ from routes.utils.credentials import (
|
||||
init_credentials_db,
|
||||
# Import new utility functions for global Spotify API creds
|
||||
_get_global_spotify_api_creds,
|
||||
save_global_spotify_api_creds
|
||||
save_global_spotify_api_creds,
|
||||
)
|
||||
from pathlib import Path
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
credentials_bp = Blueprint('credentials', __name__)
|
||||
credentials_bp = Blueprint("credentials", __name__)
|
||||
|
||||
# Initialize the database and tables when the blueprint is loaded
|
||||
init_credentials_db()
|
||||
|
||||
@credentials_bp.route('/spotify_api_config', methods=['GET', 'PUT'])
|
||||
|
||||
@credentials_bp.route("/spotify_api_config", methods=["GET", "PUT"])
|
||||
def handle_spotify_api_config():
|
||||
"""Handles GET and PUT requests for the global Spotify API client_id and client_secret."""
|
||||
try:
|
||||
if request.method == 'GET':
|
||||
if request.method == "GET":
|
||||
client_id, client_secret = _get_global_spotify_api_creds()
|
||||
if client_id is not None and client_secret is not None:
|
||||
return jsonify({"client_id": client_id, "client_secret": client_secret}), 200
|
||||
return jsonify(
|
||||
{"client_id": client_id, "client_secret": client_secret}
|
||||
), 200
|
||||
else:
|
||||
# If search.json exists but is empty/incomplete, or doesn't exist
|
||||
return jsonify({
|
||||
return jsonify(
|
||||
{
|
||||
"warning": "Global Spotify API credentials are not fully configured or file is missing.",
|
||||
"client_id": client_id or "",
|
||||
"client_secret": client_secret or ""
|
||||
}), 200
|
||||
"client_secret": client_secret or "",
|
||||
}
|
||||
), 200
|
||||
|
||||
elif request.method == 'PUT':
|
||||
elif request.method == "PUT":
|
||||
data = request.get_json()
|
||||
if not data or 'client_id' not in data or 'client_secret' not in data:
|
||||
return jsonify({"error": "Request body must contain 'client_id' and 'client_secret'"}), 400
|
||||
if not data or "client_id" not in data or "client_secret" not in data:
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Request body must contain 'client_id' and 'client_secret'"
|
||||
}
|
||||
), 400
|
||||
|
||||
client_id = data['client_id']
|
||||
client_secret = data['client_secret']
|
||||
client_id = data["client_id"]
|
||||
client_secret = data["client_secret"]
|
||||
|
||||
if not isinstance(client_id, str) or not isinstance(client_secret, str):
|
||||
return jsonify({"error": "'client_id' and 'client_secret' must be strings"}), 400
|
||||
return jsonify(
|
||||
{"error": "'client_id' and 'client_secret' must be strings"}
|
||||
), 400
|
||||
|
||||
if save_global_spotify_api_creds(client_id, client_secret):
|
||||
return jsonify({"message": "Global Spotify API credentials updated successfully."}), 200
|
||||
return jsonify(
|
||||
{"message": "Global Spotify API credentials updated successfully."}
|
||||
), 200
|
||||
else:
|
||||
return jsonify({"error": "Failed to save global Spotify API credentials."}), 500
|
||||
return jsonify(
|
||||
{"error": "Failed to save global Spotify API credentials."}
|
||||
), 500
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in /spotify_api_config: {e}", exc_info=True)
|
||||
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
|
||||
|
||||
|
||||
@credentials_bp.route('/<service>', methods=['GET'])
|
||||
@credentials_bp.route("/<service>", methods=["GET"])
|
||||
def handle_list_credentials(service):
|
||||
try:
|
||||
if service not in ['spotify', 'deezer']:
|
||||
return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400
|
||||
if service not in ["spotify", "deezer"]:
|
||||
return jsonify(
|
||||
{"error": "Invalid service. Must be 'spotify' or 'deezer'"}
|
||||
), 400
|
||||
return jsonify(list_credentials(service))
|
||||
except ValueError as e: # Should not happen with service check above
|
||||
return jsonify({"error": str(e)}), 400
|
||||
@@ -68,20 +84,23 @@ def handle_list_credentials(service):
|
||||
logger.error(f"Error listing credentials for {service}: {e}", exc_info=True)
|
||||
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
|
||||
|
||||
@credentials_bp.route('/<service>/<name>', methods=['GET', 'POST', 'PUT', 'DELETE'])
|
||||
|
||||
@credentials_bp.route("/<service>/<name>", methods=["GET", "POST", "PUT", "DELETE"])
|
||||
def handle_single_credential(service, name):
|
||||
try:
|
||||
if service not in ['spotify', 'deezer']:
|
||||
return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400
|
||||
if service not in ["spotify", "deezer"]:
|
||||
return jsonify(
|
||||
{"error": "Invalid service. Must be 'spotify' or 'deezer'"}
|
||||
), 400
|
||||
|
||||
# cred_type logic is removed for Spotify as API keys are global.
|
||||
# For Deezer, it's always 'credentials' type implicitly.
|
||||
|
||||
if request.method == 'GET':
|
||||
if request.method == "GET":
|
||||
# get_credential for Spotify now only returns region and blob_file_path
|
||||
return jsonify(get_credential(service, name))
|
||||
|
||||
elif request.method == 'POST':
|
||||
elif request.method == "POST":
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return jsonify({"error": "Request body cannot be empty."}), 400
|
||||
@@ -89,21 +108,36 @@ def handle_single_credential(service, name):
|
||||
# For Deezer, it expects 'arl' and 'region'
|
||||
# Validation is handled within create_credential utility function
|
||||
result = create_credential(service, name, data)
|
||||
return jsonify({"message": f"Credential for '{name}' ({service}) created successfully.", "details": result}), 201
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Credential for '{name}' ({service}) created successfully.",
|
||||
"details": result,
|
||||
}
|
||||
), 201
|
||||
|
||||
elif request.method == 'PUT':
|
||||
elif request.method == "PUT":
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return jsonify({"error": "Request body cannot be empty."}), 400
|
||||
# edit_credential for Spotify now handles updates to 'region', 'blob_content'
|
||||
# For Deezer, 'arl', 'region'
|
||||
result = edit_credential(service, name, data)
|
||||
return jsonify({"message": f"Credential for '{name}' ({service}) updated successfully.", "details": result})
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Credential for '{name}' ({service}) updated successfully.",
|
||||
"details": result,
|
||||
}
|
||||
)
|
||||
|
||||
elif request.method == 'DELETE':
|
||||
elif request.method == "DELETE":
|
||||
# delete_credential for Spotify also handles deleting the blob directory
|
||||
result = delete_credential(service, name)
|
||||
return jsonify({"message": f"Credential for '{name}' ({service}) deleted successfully.", "details": result})
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Credential for '{name}' ({service}) deleted successfully.",
|
||||
"details": result,
|
||||
}
|
||||
)
|
||||
|
||||
except (ValueError, FileNotFoundError, FileExistsError) as e:
|
||||
status_code = 400
|
||||
@@ -117,14 +151,18 @@ def handle_single_credential(service, name):
|
||||
logger.error(f"Server error in /<{service}>/<{name}>: {e}", exc_info=True)
|
||||
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
|
||||
|
||||
|
||||
# The '/search/<service>/<name>' route is now obsolete for Spotify and has been removed.
|
||||
|
||||
@credentials_bp.route('/all/<service>', methods=['GET'])
|
||||
|
||||
@credentials_bp.route("/all/<service>", methods=["GET"])
|
||||
def handle_all_credentials(service):
|
||||
"""Lists all credentials for a given service. For Spotify, API keys are global and not listed per account."""
|
||||
try:
|
||||
if service not in ['spotify', 'deezer']:
|
||||
return jsonify({"error": "Invalid service. Must be 'spotify' or 'deezer'"}), 400
|
||||
if service not in ["spotify", "deezer"]:
|
||||
return jsonify(
|
||||
{"error": "Invalid service. Must be 'spotify' or 'deezer'"}
|
||||
), 400
|
||||
|
||||
credentials_list = []
|
||||
account_names = list_credentials(service) # This lists names from DB
|
||||
@@ -137,17 +175,28 @@ def handle_all_credentials(service):
|
||||
# We don't add global Spotify API keys here as they are separate
|
||||
credentials_list.append({"name": name, "details": account_data})
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Credential name '{name}' listed for service '{service}' but not found by get_credential. Skipping.")
|
||||
logger.warning(
|
||||
f"Credential name '{name}' listed for service '{service}' but not found by get_credential. Skipping."
|
||||
)
|
||||
except Exception as e_inner:
|
||||
logger.error(f"Error fetching details for credential '{name}' ({service}): {e_inner}", exc_info=True)
|
||||
credentials_list.append({"name": name, "error": f"Could not retrieve details: {str(e_inner)}"})
|
||||
logger.error(
|
||||
f"Error fetching details for credential '{name}' ({service}): {e_inner}",
|
||||
exc_info=True,
|
||||
)
|
||||
credentials_list.append(
|
||||
{
|
||||
"name": name,
|
||||
"error": f"Could not retrieve details: {str(e_inner)}",
|
||||
}
|
||||
)
|
||||
|
||||
return jsonify(credentials_list)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in /all/{service}: {e}", exc_info=True)
|
||||
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
|
||||
|
||||
@credentials_bp.route('/markets', methods=['GET'])
|
||||
|
||||
@credentials_bp.route("/markets", methods=["GET"])
|
||||
def handle_markets():
|
||||
"""
|
||||
Returns a list of unique market regions for Deezer and Spotify accounts.
|
||||
@@ -157,29 +206,35 @@ def handle_markets():
|
||||
spotify_regions = set()
|
||||
|
||||
# Process Deezer accounts
|
||||
deezer_account_names = list_credentials('deezer')
|
||||
deezer_account_names = list_credentials("deezer")
|
||||
for name in deezer_account_names:
|
||||
try:
|
||||
account_data = get_credential('deezer', name)
|
||||
if account_data and 'region' in account_data and account_data['region']:
|
||||
deezer_regions.add(account_data['region'])
|
||||
account_data = get_credential("deezer", name)
|
||||
if account_data and "region" in account_data and account_data["region"]:
|
||||
deezer_regions.add(account_data["region"])
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not retrieve region for deezer account {name}: {e}")
|
||||
logger.warning(
|
||||
f"Could not retrieve region for deezer account {name}: {e}"
|
||||
)
|
||||
|
||||
# Process Spotify accounts
|
||||
spotify_account_names = list_credentials('spotify')
|
||||
spotify_account_names = list_credentials("spotify")
|
||||
for name in spotify_account_names:
|
||||
try:
|
||||
account_data = get_credential('spotify', name)
|
||||
if account_data and 'region' in account_data and account_data['region']:
|
||||
spotify_regions.add(account_data['region'])
|
||||
account_data = get_credential("spotify", name)
|
||||
if account_data and "region" in account_data and account_data["region"]:
|
||||
spotify_regions.add(account_data["region"])
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not retrieve region for spotify account {name}: {e}")
|
||||
logger.warning(
|
||||
f"Could not retrieve region for spotify account {name}: {e}"
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
return jsonify(
|
||||
{
|
||||
"deezer": sorted(list(deezer_regions)),
|
||||
"spotify": sorted(list(spotify_regions))
|
||||
}), 200
|
||||
"spotify": sorted(list(spotify_regions)),
|
||||
}
|
||||
), 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in /markets: {e}", exc_info=True)
|
||||
|
||||
@@ -3,40 +3,45 @@ from routes.utils.history_manager import get_history_entries
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
history_bp = Blueprint('history', __name__, url_prefix='/api/history')
|
||||
history_bp = Blueprint("history", __name__, url_prefix="/api/history")
|
||||
|
||||
@history_bp.route('', methods=['GET'])
|
||||
|
||||
@history_bp.route("", methods=["GET"])
|
||||
def get_download_history():
|
||||
"""API endpoint to retrieve download history with pagination, sorting, and filtering."""
|
||||
try:
|
||||
limit = request.args.get('limit', 25, type=int)
|
||||
offset = request.args.get('offset', 0, type=int)
|
||||
sort_by = request.args.get('sort_by', 'timestamp_completed')
|
||||
sort_order = request.args.get('sort_order', 'DESC')
|
||||
limit = request.args.get("limit", 25, type=int)
|
||||
offset = request.args.get("offset", 0, type=int)
|
||||
sort_by = request.args.get("sort_by", "timestamp_completed")
|
||||
sort_order = request.args.get("sort_order", "DESC")
|
||||
|
||||
# Basic filtering example: filter by status_final or download_type
|
||||
filters = {}
|
||||
status_filter = request.args.get('status_final')
|
||||
status_filter = request.args.get("status_final")
|
||||
if status_filter:
|
||||
filters['status_final'] = status_filter
|
||||
filters["status_final"] = status_filter
|
||||
|
||||
type_filter = request.args.get('download_type')
|
||||
type_filter = request.args.get("download_type")
|
||||
if type_filter:
|
||||
filters['download_type'] = type_filter
|
||||
filters["download_type"] = type_filter
|
||||
|
||||
# Add more filters as needed, e.g., by item_name (would need LIKE for partial match)
|
||||
# search_term = request.args.get('search')
|
||||
# if search_term:
|
||||
# filters['item_name'] = f'%{search_term}%' # This would require LIKE in get_history_entries
|
||||
|
||||
entries, total_count = get_history_entries(limit, offset, sort_by, sort_order, filters)
|
||||
entries, total_count = get_history_entries(
|
||||
limit, offset, sort_by, sort_order, filters
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
'entries': entries,
|
||||
'total_count': total_count,
|
||||
'limit': limit,
|
||||
'offset': offset
|
||||
})
|
||||
return jsonify(
|
||||
{
|
||||
"entries": entries,
|
||||
"total_count": total_count,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in /api/history endpoint: {e}", exc_info=True)
|
||||
return jsonify({"error": "Failed to retrieve download history"}), 500
|
||||
@@ -1,12 +1,15 @@
|
||||
from flask import Blueprint, Response, request, jsonify
|
||||
import os
|
||||
import json
|
||||
import traceback
|
||||
import logging # Added logging import
|
||||
import uuid # For generating error task IDs
|
||||
import time # For timestamps
|
||||
from routes.utils.celery_queue_manager import download_queue_manager
|
||||
from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState # For error task creation
|
||||
from routes.utils.celery_tasks import (
|
||||
store_task_info,
|
||||
store_task_status,
|
||||
ProgressState,
|
||||
) # For error task creation
|
||||
import threading # For playlist watch trigger
|
||||
|
||||
# Imports from playlist_watch.py
|
||||
@@ -17,15 +20,19 @@ from routes.utils.watch.db import (
|
||||
get_watched_playlists,
|
||||
add_specific_tracks_to_playlist_table,
|
||||
remove_specific_tracks_from_playlist_table,
|
||||
is_track_in_playlist_db # Added import
|
||||
is_track_in_playlist_db, # Added import
|
||||
)
|
||||
from routes.utils.get_info import get_spotify_info # Already used, but ensure it's here
|
||||
from routes.utils.watch.manager import check_watched_playlists, get_watch_config # For manual trigger & config
|
||||
from routes.utils.watch.manager import (
|
||||
check_watched_playlists,
|
||||
get_watch_config,
|
||||
) # For manual trigger & config
|
||||
|
||||
logger = logging.getLogger(__name__) # Added logger initialization
|
||||
playlist_bp = Blueprint('playlist', __name__, url_prefix='/api/playlist')
|
||||
playlist_bp = Blueprint("playlist", __name__, url_prefix="/api/playlist")
|
||||
|
||||
@playlist_bp.route('/download/<playlist_id>', methods=['GET'])
|
||||
|
||||
@playlist_bp.route("/download/<playlist_id>", methods=["GET"])
|
||||
def handle_download(playlist_id):
|
||||
# Retrieve essential parameters from the request.
|
||||
# name = request.args.get('name') # Removed
|
||||
@@ -34,28 +41,42 @@ def handle_download(playlist_id):
|
||||
|
||||
# Construct the URL from playlist_id
|
||||
url = f"https://open.spotify.com/playlist/{playlist_id}"
|
||||
orig_params["original_url"] = request.url # Update original_url to the constructed one
|
||||
orig_params["original_url"] = (
|
||||
request.url
|
||||
) # Update original_url to the constructed one
|
||||
|
||||
# Fetch metadata from Spotify
|
||||
try:
|
||||
playlist_info = get_spotify_info(playlist_id, "playlist")
|
||||
if not playlist_info or not playlist_info.get('name') or not playlist_info.get('owner'):
|
||||
if (
|
||||
not playlist_info
|
||||
or not playlist_info.get("name")
|
||||
or not playlist_info.get("owner")
|
||||
):
|
||||
return Response(
|
||||
json.dumps({"error": f"Could not retrieve metadata for playlist ID: {playlist_id}"}),
|
||||
json.dumps(
|
||||
{
|
||||
"error": f"Could not retrieve metadata for playlist ID: {playlist_id}"
|
||||
}
|
||||
),
|
||||
status=404,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
name_from_spotify = playlist_info.get('name')
|
||||
name_from_spotify = playlist_info.get("name")
|
||||
# Use owner's display_name as the 'artist' for playlists
|
||||
owner_info = playlist_info.get('owner', {})
|
||||
artist_from_spotify = owner_info.get('display_name', "Unknown Owner")
|
||||
owner_info = playlist_info.get("owner", {})
|
||||
artist_from_spotify = owner_info.get("display_name", "Unknown Owner")
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
json.dumps({"error": f"Failed to fetch metadata for playlist {playlist_id}: {str(e)}"}),
|
||||
json.dumps(
|
||||
{
|
||||
"error": f"Failed to fetch metadata for playlist {playlist_id}: {str(e)}"
|
||||
}
|
||||
),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
# Validate required parameters
|
||||
@@ -63,83 +84,94 @@ def handle_download(playlist_id):
|
||||
return Response(
|
||||
json.dumps({"error": "Missing required parameter: url"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
try:
|
||||
task_id = download_queue_manager.add_task({
|
||||
task_id = download_queue_manager.add_task(
|
||||
{
|
||||
"download_type": "playlist",
|
||||
"url": url,
|
||||
"name": name_from_spotify, # Use fetched name
|
||||
"artist": artist_from_spotify, # Use fetched owner name as artist
|
||||
"orig_request": orig_params
|
||||
})
|
||||
"orig_request": orig_params,
|
||||
}
|
||||
)
|
||||
# Removed DuplicateDownloadError handling, add_task now manages this by creating an error task.
|
||||
except Exception as e:
|
||||
# Generic error handling for other issues during task submission
|
||||
error_task_id = str(uuid.uuid4())
|
||||
store_task_info(error_task_id, {
|
||||
store_task_info(
|
||||
error_task_id,
|
||||
{
|
||||
"download_type": "playlist",
|
||||
"url": url,
|
||||
"name": name_from_spotify, # Use fetched name
|
||||
"artist": artist_from_spotify, # Use fetched owner name as artist
|
||||
"original_request": orig_params,
|
||||
"created_at": time.time(),
|
||||
"is_submission_error_task": True
|
||||
})
|
||||
store_task_status(error_task_id, {
|
||||
"is_submission_error_task": True,
|
||||
},
|
||||
)
|
||||
store_task_status(
|
||||
error_task_id,
|
||||
{
|
||||
"status": ProgressState.ERROR,
|
||||
"error": f"Failed to queue playlist download: {str(e)}",
|
||||
"timestamp": time.time()
|
||||
})
|
||||
"timestamp": time.time(),
|
||||
},
|
||||
)
|
||||
return Response(
|
||||
json.dumps({"error": f"Failed to queue playlist download: {str(e)}", "task_id": error_task_id}),
|
||||
json.dumps(
|
||||
{
|
||||
"error": f"Failed to queue playlist download: {str(e)}",
|
||||
"task_id": error_task_id,
|
||||
}
|
||||
),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
return Response(
|
||||
json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id
|
||||
status=202,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
@playlist_bp.route('/download/cancel', methods=['GET'])
|
||||
|
||||
@playlist_bp.route("/download/cancel", methods=["GET"])
|
||||
def cancel_download():
|
||||
"""
|
||||
Cancel a running playlist download process by its prg file name.
|
||||
"""
|
||||
prg_file = request.args.get('prg_file')
|
||||
prg_file = request.args.get("prg_file")
|
||||
if not prg_file:
|
||||
return Response(
|
||||
json.dumps({"error": "Missing process id (prg_file) parameter"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
# Use the queue manager's cancellation method.
|
||||
result = download_queue_manager.cancel_task(prg_file)
|
||||
status_code = 200 if result.get("status") == "cancelled" else 404
|
||||
|
||||
return Response(
|
||||
json.dumps(result),
|
||||
status=status_code,
|
||||
mimetype='application/json'
|
||||
)
|
||||
return Response(json.dumps(result), status=status_code, mimetype="application/json")
|
||||
|
||||
@playlist_bp.route('/info', methods=['GET'])
|
||||
|
||||
@playlist_bp.route("/info", methods=["GET"])
|
||||
def get_playlist_info():
|
||||
"""
|
||||
Retrieve Spotify playlist metadata given a Spotify playlist ID.
|
||||
Expects a query parameter 'id' that contains the Spotify playlist ID.
|
||||
"""
|
||||
spotify_id = request.args.get('id')
|
||||
spotify_id = request.args.get("id")
|
||||
|
||||
if not spotify_id:
|
||||
return Response(
|
||||
json.dumps({"error": "Missing parameter: id"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -148,36 +180,32 @@ def get_playlist_info():
|
||||
|
||||
# If playlist_info is successfully fetched, check if it's watched
|
||||
# and augment track items with is_locally_known status
|
||||
if playlist_info and playlist_info.get('id'):
|
||||
watched_playlist_details = get_watched_playlist(playlist_info['id'])
|
||||
if playlist_info and playlist_info.get("id"):
|
||||
watched_playlist_details = get_watched_playlist(playlist_info["id"])
|
||||
if watched_playlist_details: # Playlist is being watched
|
||||
if playlist_info.get('tracks') and playlist_info['tracks'].get('items'):
|
||||
for item in playlist_info['tracks']['items']:
|
||||
if item and item.get('track') and item['track'].get('id'):
|
||||
track_id = item['track']['id']
|
||||
item['track']['is_locally_known'] = is_track_in_playlist_db(playlist_info['id'], track_id)
|
||||
elif item and item.get('track'): # Track object exists but no ID
|
||||
item['track']['is_locally_known'] = False
|
||||
if playlist_info.get("tracks") and playlist_info["tracks"].get("items"):
|
||||
for item in playlist_info["tracks"]["items"]:
|
||||
if item and item.get("track") and item["track"].get("id"):
|
||||
track_id = item["track"]["id"]
|
||||
item["track"]["is_locally_known"] = is_track_in_playlist_db(
|
||||
playlist_info["id"], track_id
|
||||
)
|
||||
elif item and item.get(
|
||||
"track"
|
||||
): # Track object exists but no ID
|
||||
item["track"]["is_locally_known"] = False
|
||||
# If not watched, or no tracks, is_locally_known will not be added, or tracks won't exist to add it to.
|
||||
# Frontend should handle absence of this key as false.
|
||||
|
||||
return Response(
|
||||
json.dumps(playlist_info),
|
||||
status=200,
|
||||
mimetype='application/json'
|
||||
json.dumps(playlist_info), status=200, mimetype="application/json"
|
||||
)
|
||||
except Exception as e:
|
||||
error_data = {
|
||||
"error": str(e),
|
||||
"traceback": traceback.format_exc()
|
||||
}
|
||||
return Response(
|
||||
json.dumps(error_data),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
)
|
||||
error_data = {"error": str(e), "traceback": traceback.format_exc()}
|
||||
return Response(json.dumps(error_data), status=500, mimetype="application/json")
|
||||
|
||||
@playlist_bp.route('/watch/<string:playlist_spotify_id>', methods=['PUT'])
|
||||
|
||||
@playlist_bp.route("/watch/<string:playlist_spotify_id>", methods=["PUT"])
|
||||
def add_to_watchlist(playlist_spotify_id):
|
||||
"""Adds a playlist to the watchlist."""
|
||||
watch_config = get_watch_config()
|
||||
@@ -188,13 +216,21 @@ def add_to_watchlist(playlist_spotify_id):
|
||||
try:
|
||||
# Check if already watched
|
||||
if get_watched_playlist(playlist_spotify_id):
|
||||
return jsonify({"message": f"Playlist {playlist_spotify_id} is already being watched."}), 200
|
||||
return jsonify(
|
||||
{"message": f"Playlist {playlist_spotify_id} is already being watched."}
|
||||
), 200
|
||||
|
||||
# Fetch playlist details from Spotify to populate our DB
|
||||
playlist_data = get_spotify_info(playlist_spotify_id, "playlist")
|
||||
if not playlist_data or 'id' not in playlist_data:
|
||||
logger.error(f"Could not fetch details for playlist {playlist_spotify_id} from Spotify.")
|
||||
return jsonify({"error": f"Could not fetch details for playlist {playlist_spotify_id} from Spotify."}), 404
|
||||
if not playlist_data or "id" not in playlist_data:
|
||||
logger.error(
|
||||
f"Could not fetch details for playlist {playlist_spotify_id} from Spotify."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"error": f"Could not fetch details for playlist {playlist_spotify_id} from Spotify."
|
||||
}
|
||||
), 404
|
||||
|
||||
add_playlist_db(playlist_data) # This also creates the tracks table
|
||||
|
||||
@@ -206,13 +242,23 @@ def add_to_watchlist(playlist_spotify_id):
|
||||
# from routes.utils.watch.db import add_tracks_to_playlist_db # Keep local import for clarity
|
||||
# add_tracks_to_playlist_db(playlist_spotify_id, initial_track_items)
|
||||
|
||||
logger.info(f"Playlist {playlist_spotify_id} added to watchlist. Its tracks will be processed by the watch manager.")
|
||||
return jsonify({"message": f"Playlist {playlist_spotify_id} added to watchlist. Tracks will be processed shortly."}), 201
|
||||
logger.info(
|
||||
f"Playlist {playlist_spotify_id} added to watchlist. Its tracks will be processed by the watch manager."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Playlist {playlist_spotify_id} added to watchlist. Tracks will be processed shortly."
|
||||
}
|
||||
), 201
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding playlist {playlist_spotify_id} to watchlist: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error adding playlist {playlist_spotify_id} to watchlist: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify({"error": f"Could not add playlist to watchlist: {str(e)}"}), 500
|
||||
|
||||
@playlist_bp.route('/watch/<string:playlist_spotify_id>/status', methods=['GET'])
|
||||
|
||||
@playlist_bp.route("/watch/<string:playlist_spotify_id>/status", methods=["GET"])
|
||||
def get_playlist_watch_status(playlist_spotify_id):
|
||||
"""Checks if a specific playlist is being watched."""
|
||||
logger.info(f"Checking watch status for playlist {playlist_spotify_id}.")
|
||||
@@ -225,10 +271,14 @@ def get_playlist_watch_status(playlist_spotify_id):
|
||||
# between "not watched" and an actual error fetching status.
|
||||
return jsonify({"is_watched": False}), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking watch status for playlist {playlist_spotify_id}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error checking watch status for playlist {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify({"error": f"Could not check watch status: {str(e)}"}), 500
|
||||
|
||||
@playlist_bp.route('/watch/<string:playlist_spotify_id>', methods=['DELETE'])
|
||||
|
||||
@playlist_bp.route("/watch/<string:playlist_spotify_id>", methods=["DELETE"])
|
||||
def remove_from_watchlist(playlist_spotify_id):
|
||||
"""Removes a playlist from the watchlist."""
|
||||
watch_config = get_watch_config()
|
||||
@@ -238,76 +288,149 @@ def remove_from_watchlist(playlist_spotify_id):
|
||||
logger.info(f"Attempting to remove playlist {playlist_spotify_id} from watchlist.")
|
||||
try:
|
||||
if not get_watched_playlist(playlist_spotify_id):
|
||||
return jsonify({"error": f"Playlist {playlist_spotify_id} not found in watchlist."}), 404
|
||||
return jsonify(
|
||||
{"error": f"Playlist {playlist_spotify_id} not found in watchlist."}
|
||||
), 404
|
||||
|
||||
remove_playlist_db(playlist_spotify_id)
|
||||
logger.info(f"Playlist {playlist_spotify_id} removed from watchlist successfully.")
|
||||
return jsonify({"message": f"Playlist {playlist_spotify_id} removed from watchlist."}), 200
|
||||
logger.info(
|
||||
f"Playlist {playlist_spotify_id} removed from watchlist successfully."
|
||||
)
|
||||
return jsonify(
|
||||
{"message": f"Playlist {playlist_spotify_id} removed from watchlist."}
|
||||
), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing playlist {playlist_spotify_id} from watchlist: {e}", exc_info=True)
|
||||
return jsonify({"error": f"Could not remove playlist from watchlist: {str(e)}"}), 500
|
||||
logger.error(
|
||||
f"Error removing playlist {playlist_spotify_id} from watchlist: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify(
|
||||
{"error": f"Could not remove playlist from watchlist: {str(e)}"}
|
||||
), 500
|
||||
|
||||
@playlist_bp.route('/watch/<string:playlist_spotify_id>/tracks', methods=['POST'])
|
||||
|
||||
@playlist_bp.route("/watch/<string:playlist_spotify_id>/tracks", methods=["POST"])
|
||||
def mark_tracks_as_known(playlist_spotify_id):
|
||||
"""Fetches details for given track IDs and adds/updates them in the playlist's local DB table."""
|
||||
watch_config = get_watch_config()
|
||||
if not watch_config.get("enabled", False):
|
||||
return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark tracks."}), 403
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Watch feature is currently disabled globally. Cannot mark tracks."
|
||||
}
|
||||
), 403
|
||||
|
||||
logger.info(f"Attempting to mark tracks as known for playlist {playlist_spotify_id}.")
|
||||
logger.info(
|
||||
f"Attempting to mark tracks as known for playlist {playlist_spotify_id}."
|
||||
)
|
||||
try:
|
||||
track_ids = request.json
|
||||
if not isinstance(track_ids, list) or not all(isinstance(tid, str) for tid in track_ids):
|
||||
return jsonify({"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."}), 400
|
||||
if not isinstance(track_ids, list) or not all(
|
||||
isinstance(tid, str) for tid in track_ids
|
||||
):
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."
|
||||
}
|
||||
), 400
|
||||
|
||||
if not get_watched_playlist(playlist_spotify_id):
|
||||
return jsonify({"error": f"Playlist {playlist_spotify_id} is not being watched."}), 404
|
||||
return jsonify(
|
||||
{"error": f"Playlist {playlist_spotify_id} is not being watched."}
|
||||
), 404
|
||||
|
||||
fetched_tracks_details = []
|
||||
for track_id in track_ids:
|
||||
try:
|
||||
track_detail = get_spotify_info(track_id, "track")
|
||||
if track_detail and track_detail.get('id'):
|
||||
if track_detail and track_detail.get("id"):
|
||||
fetched_tracks_details.append(track_detail)
|
||||
else:
|
||||
logger.warning(f"Could not fetch details for track {track_id} when marking as known for playlist {playlist_spotify_id}.")
|
||||
logger.warning(
|
||||
f"Could not fetch details for track {track_id} when marking as known for playlist {playlist_spotify_id}."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch Spotify details for track {track_id}: {e}")
|
||||
logger.error(
|
||||
f"Failed to fetch Spotify details for track {track_id}: {e}"
|
||||
)
|
||||
|
||||
if not fetched_tracks_details:
|
||||
return jsonify({"message": "No valid track details could be fetched to mark as known.", "processed_count": 0}), 200
|
||||
return jsonify(
|
||||
{
|
||||
"message": "No valid track details could be fetched to mark as known.",
|
||||
"processed_count": 0,
|
||||
}
|
||||
), 200
|
||||
|
||||
add_specific_tracks_to_playlist_table(playlist_spotify_id, fetched_tracks_details)
|
||||
logger.info(f"Successfully marked/updated {len(fetched_tracks_details)} tracks as known for playlist {playlist_spotify_id}.")
|
||||
return jsonify({"message": f"Successfully processed {len(fetched_tracks_details)} tracks for playlist {playlist_spotify_id}."}), 200
|
||||
add_specific_tracks_to_playlist_table(
|
||||
playlist_spotify_id, fetched_tracks_details
|
||||
)
|
||||
logger.info(
|
||||
f"Successfully marked/updated {len(fetched_tracks_details)} tracks as known for playlist {playlist_spotify_id}."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Successfully processed {len(fetched_tracks_details)} tracks for playlist {playlist_spotify_id}."
|
||||
}
|
||||
), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error marking tracks as known for playlist {playlist_spotify_id}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error marking tracks as known for playlist {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify({"error": f"Could not mark tracks as known: {str(e)}"}), 500
|
||||
|
||||
@playlist_bp.route('/watch/<string:playlist_spotify_id>/tracks', methods=['DELETE'])
|
||||
|
||||
@playlist_bp.route("/watch/<string:playlist_spotify_id>/tracks", methods=["DELETE"])
|
||||
def mark_tracks_as_missing_locally(playlist_spotify_id):
|
||||
"""Removes specified tracks from the playlist's local DB table."""
|
||||
watch_config = get_watch_config()
|
||||
if not watch_config.get("enabled", False):
|
||||
return jsonify({"error": "Watch feature is currently disabled globally. Cannot mark tracks."}), 403
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Watch feature is currently disabled globally. Cannot mark tracks."
|
||||
}
|
||||
), 403
|
||||
|
||||
logger.info(f"Attempting to mark tracks as missing (remove locally) for playlist {playlist_spotify_id}.")
|
||||
logger.info(
|
||||
f"Attempting to mark tracks as missing (remove locally) for playlist {playlist_spotify_id}."
|
||||
)
|
||||
try:
|
||||
track_ids = request.json
|
||||
if not isinstance(track_ids, list) or not all(isinstance(tid, str) for tid in track_ids):
|
||||
return jsonify({"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."}), 400
|
||||
if not isinstance(track_ids, list) or not all(
|
||||
isinstance(tid, str) for tid in track_ids
|
||||
):
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Invalid request body. Expecting a JSON array of track Spotify IDs."
|
||||
}
|
||||
), 400
|
||||
|
||||
if not get_watched_playlist(playlist_spotify_id):
|
||||
return jsonify({"error": f"Playlist {playlist_spotify_id} is not being watched."}), 404
|
||||
return jsonify(
|
||||
{"error": f"Playlist {playlist_spotify_id} is not being watched."}
|
||||
), 404
|
||||
|
||||
deleted_count = remove_specific_tracks_from_playlist_table(playlist_spotify_id, track_ids)
|
||||
logger.info(f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}.")
|
||||
return jsonify({"message": f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."}), 200
|
||||
deleted_count = remove_specific_tracks_from_playlist_table(
|
||||
playlist_spotify_id, track_ids
|
||||
)
|
||||
logger.info(
|
||||
f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Successfully removed {deleted_count} tracks locally for playlist {playlist_spotify_id}."
|
||||
}
|
||||
), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error marking tracks as missing (deleting locally) for playlist {playlist_spotify_id}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error marking tracks as missing (deleting locally) for playlist {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify({"error": f"Could not mark tracks as missing: {str(e)}"}), 500
|
||||
|
||||
@playlist_bp.route('/watch/list', methods=['GET'])
|
||||
|
||||
@playlist_bp.route("/watch/list", methods=["GET"])
|
||||
def list_watched_playlists_endpoint():
|
||||
"""Lists all playlists currently in the watchlist."""
|
||||
try:
|
||||
@@ -317,43 +440,86 @@ def list_watched_playlists_endpoint():
|
||||
logger.error(f"Error listing watched playlists: {e}", exc_info=True)
|
||||
return jsonify({"error": f"Could not list watched playlists: {str(e)}"}), 500
|
||||
|
||||
@playlist_bp.route('/watch/trigger_check', methods=['POST'])
|
||||
|
||||
@playlist_bp.route("/watch/trigger_check", methods=["POST"])
|
||||
def trigger_playlist_check_endpoint():
|
||||
"""Manually triggers the playlist checking mechanism for all watched playlists."""
|
||||
watch_config = get_watch_config()
|
||||
if not watch_config.get("enabled", False):
|
||||
return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Watch feature is currently disabled globally. Cannot trigger check."
|
||||
}
|
||||
), 403
|
||||
|
||||
logger.info("Manual trigger for playlist check received for all playlists.")
|
||||
try:
|
||||
# Run check_watched_playlists without an ID to check all
|
||||
thread = threading.Thread(target=check_watched_playlists, args=(None,))
|
||||
thread.start()
|
||||
return jsonify({"message": "Playlist check triggered successfully in the background for all playlists."}), 202
|
||||
return jsonify(
|
||||
{
|
||||
"message": "Playlist check triggered successfully in the background for all playlists."
|
||||
}
|
||||
), 202
|
||||
except Exception as e:
|
||||
logger.error(f"Error manually triggering playlist check for all: {e}", exc_info=True)
|
||||
return jsonify({"error": f"Could not trigger playlist check for all: {str(e)}"}), 500
|
||||
logger.error(
|
||||
f"Error manually triggering playlist check for all: {e}", exc_info=True
|
||||
)
|
||||
return jsonify(
|
||||
{"error": f"Could not trigger playlist check for all: {str(e)}"}
|
||||
), 500
|
||||
|
||||
@playlist_bp.route('/watch/trigger_check/<string:playlist_spotify_id>', methods=['POST'])
|
||||
|
||||
@playlist_bp.route(
|
||||
"/watch/trigger_check/<string:playlist_spotify_id>", methods=["POST"]
|
||||
)
|
||||
def trigger_specific_playlist_check_endpoint(playlist_spotify_id: str):
|
||||
"""Manually triggers the playlist checking mechanism for a specific playlist."""
|
||||
watch_config = get_watch_config()
|
||||
if not watch_config.get("enabled", False):
|
||||
return jsonify({"error": "Watch feature is currently disabled globally. Cannot trigger check."}), 403
|
||||
return jsonify(
|
||||
{
|
||||
"error": "Watch feature is currently disabled globally. Cannot trigger check."
|
||||
}
|
||||
), 403
|
||||
|
||||
logger.info(f"Manual trigger for specific playlist check received for ID: {playlist_spotify_id}")
|
||||
logger.info(
|
||||
f"Manual trigger for specific playlist check received for ID: {playlist_spotify_id}"
|
||||
)
|
||||
try:
|
||||
# Check if the playlist is actually in the watchlist first
|
||||
watched_playlist = get_watched_playlist(playlist_spotify_id)
|
||||
if not watched_playlist:
|
||||
logger.warning(f"Trigger specific check: Playlist ID {playlist_spotify_id} not found in watchlist.")
|
||||
return jsonify({"error": f"Playlist {playlist_spotify_id} is not in the watchlist. Add it first."}), 404
|
||||
logger.warning(
|
||||
f"Trigger specific check: Playlist ID {playlist_spotify_id} not found in watchlist."
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"error": f"Playlist {playlist_spotify_id} is not in the watchlist. Add it first."
|
||||
}
|
||||
), 404
|
||||
|
||||
# Run check_watched_playlists with the specific ID
|
||||
thread = threading.Thread(target=check_watched_playlists, args=(playlist_spotify_id,))
|
||||
thread = threading.Thread(
|
||||
target=check_watched_playlists, args=(playlist_spotify_id,)
|
||||
)
|
||||
thread.start()
|
||||
logger.info(f"Playlist check triggered in background for specific playlist ID: {playlist_spotify_id}")
|
||||
return jsonify({"message": f"Playlist check triggered successfully in the background for {playlist_spotify_id}."}), 202
|
||||
logger.info(
|
||||
f"Playlist check triggered in background for specific playlist ID: {playlist_spotify_id}"
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"message": f"Playlist check triggered successfully in the background for {playlist_spotify_id}."
|
||||
}
|
||||
), 202
|
||||
except Exception as e:
|
||||
logger.error(f"Error manually triggering specific playlist check for {playlist_spotify_id}: {e}", exc_info=True)
|
||||
return jsonify({"error": f"Could not trigger playlist check for {playlist_spotify_id}: {str(e)}"}), 500
|
||||
logger.error(
|
||||
f"Error manually triggering specific playlist check for {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return jsonify(
|
||||
{
|
||||
"error": f"Could not trigger playlist check for {playlist_spotify_id}: {str(e)}"
|
||||
}
|
||||
), 500
|
||||
|
||||
113
routes/prgs.py
113
routes/prgs.py
@@ -1,6 +1,4 @@
|
||||
from flask import Blueprint, abort, jsonify, Response, stream_with_context, request
|
||||
import os
|
||||
import json
|
||||
from flask import Blueprint, abort, jsonify, request
|
||||
import logging
|
||||
import time
|
||||
|
||||
@@ -11,18 +9,18 @@ from routes.utils.celery_tasks import (
|
||||
get_all_tasks,
|
||||
cancel_task,
|
||||
retry_task,
|
||||
ProgressState,
|
||||
redis_client
|
||||
redis_client,
|
||||
)
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
prgs_bp = Blueprint('prgs', __name__, url_prefix='/api/prgs')
|
||||
prgs_bp = Blueprint("prgs", __name__, url_prefix="/api/prgs")
|
||||
|
||||
# (Old .prg file system removed. Using new task system only.)
|
||||
|
||||
@prgs_bp.route('/<task_id>', methods=['GET'])
|
||||
|
||||
@prgs_bp.route("/<task_id>", methods=["GET"])
|
||||
def get_prg_file(task_id):
|
||||
"""
|
||||
Return a JSON object with the resource type, its name (title),
|
||||
@@ -49,20 +47,31 @@ def get_prg_file(task_id):
|
||||
if download_type and item_url:
|
||||
try:
|
||||
# Extract the ID from the item_url (last part of the path)
|
||||
item_id = item_url.split('/')[-1]
|
||||
item_id = item_url.split("/")[-1]
|
||||
if item_id: # Ensure item_id is not empty
|
||||
base_url = request.host_url.rstrip('/')
|
||||
dynamic_original_url = f"{base_url}/api/{download_type}/download/{item_id}"
|
||||
base_url = request.host_url.rstrip("/")
|
||||
dynamic_original_url = (
|
||||
f"{base_url}/api/{download_type}/download/{item_id}"
|
||||
)
|
||||
else:
|
||||
logger.warning(f"Could not extract item ID from URL: {item_url} for task {task_id}. Falling back for original_url.")
|
||||
logger.warning(
|
||||
f"Could not extract item ID from URL: {item_url} for task {task_id}. Falling back for original_url."
|
||||
)
|
||||
original_request_obj = task_info.get("original_request", {})
|
||||
dynamic_original_url = original_request_obj.get("original_url", "")
|
||||
except Exception as e:
|
||||
logger.error(f"Error constructing dynamic original_url for task {task_id}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error constructing dynamic original_url for task {task_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
original_request_obj = task_info.get("original_request", {})
|
||||
dynamic_original_url = original_request_obj.get("original_url", "") # Fallback on any error
|
||||
dynamic_original_url = original_request_obj.get(
|
||||
"original_url", ""
|
||||
) # Fallback on any error
|
||||
else:
|
||||
logger.warning(f"Missing download_type ('{download_type}') or item_url ('{item_url}') in task_info for task {task_id}. Falling back for original_url.")
|
||||
logger.warning(
|
||||
f"Missing download_type ('{download_type}') or item_url ('{item_url}') in task_info for task {task_id}. Falling back for original_url."
|
||||
)
|
||||
original_request_obj = task_info.get("original_request", {})
|
||||
dynamic_original_url = original_request_obj.get("original_url", "")
|
||||
|
||||
@@ -73,12 +82,12 @@ def get_prg_file(task_id):
|
||||
"last_line": last_status,
|
||||
"timestamp": time.time(),
|
||||
"task_id": task_id,
|
||||
"status_count": status_count
|
||||
"status_count": status_count,
|
||||
}
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
@prgs_bp.route('/delete/<task_id>', methods=['DELETE'])
|
||||
@prgs_bp.route("/delete/<task_id>", methods=["DELETE"])
|
||||
def delete_prg_file(task_id):
|
||||
"""
|
||||
Delete a task's information and history.
|
||||
@@ -92,13 +101,12 @@ def delete_prg_file(task_id):
|
||||
if not task_info:
|
||||
abort(404, "Task not found")
|
||||
cancel_task(task_id)
|
||||
from routes.utils.celery_tasks import redis_client
|
||||
redis_client.delete(f"task:{task_id}:info")
|
||||
redis_client.delete(f"task:{task_id}:status")
|
||||
return {'message': f'Task {task_id} deleted successfully'}, 200
|
||||
return {"message": f"Task {task_id} deleted successfully"}, 200
|
||||
|
||||
|
||||
@prgs_bp.route('/list', methods=['GET'])
|
||||
@prgs_bp.route("/list", methods=["GET"])
|
||||
def list_prg_files():
|
||||
"""
|
||||
Retrieve a list of all tasks in the system.
|
||||
@@ -116,20 +124,38 @@ def list_prg_files():
|
||||
last_status = get_last_task_status(task_id)
|
||||
|
||||
if task_info and last_status:
|
||||
detailed_tasks.append({
|
||||
detailed_tasks.append(
|
||||
{
|
||||
"task_id": task_id,
|
||||
"type": task_info.get("type", task_summary.get("type", "unknown")),
|
||||
"name": task_info.get("name", task_summary.get("name", "Unknown")),
|
||||
"artist": task_info.get("artist", task_summary.get("artist", "")),
|
||||
"download_type": task_info.get("download_type", task_summary.get("download_type", "unknown")),
|
||||
"status": last_status.get("status", "unknown"), # Keep summary status for quick access
|
||||
"type": task_info.get(
|
||||
"type", task_summary.get("type", "unknown")
|
||||
),
|
||||
"name": task_info.get(
|
||||
"name", task_summary.get("name", "Unknown")
|
||||
),
|
||||
"artist": task_info.get(
|
||||
"artist", task_summary.get("artist", "")
|
||||
),
|
||||
"download_type": task_info.get(
|
||||
"download_type",
|
||||
task_summary.get("download_type", "unknown"),
|
||||
),
|
||||
"status": last_status.get(
|
||||
"status", "unknown"
|
||||
), # Keep summary status for quick access
|
||||
"last_status_obj": last_status, # Full last status object
|
||||
"original_request": task_info.get("original_request", {}),
|
||||
"created_at": task_info.get("created_at", 0),
|
||||
"timestamp": last_status.get("timestamp", task_info.get("created_at", 0))
|
||||
})
|
||||
elif task_info: # If last_status is somehow missing, still provide some info
|
||||
detailed_tasks.append({
|
||||
"timestamp": last_status.get(
|
||||
"timestamp", task_info.get("created_at", 0)
|
||||
),
|
||||
}
|
||||
)
|
||||
elif (
|
||||
task_info
|
||||
): # If last_status is somehow missing, still provide some info
|
||||
detailed_tasks.append(
|
||||
{
|
||||
"task_id": task_id,
|
||||
"type": task_info.get("type", "unknown"),
|
||||
"name": task_info.get("name", "Unknown"),
|
||||
@@ -139,11 +165,14 @@ def list_prg_files():
|
||||
"last_status_obj": None,
|
||||
"original_request": task_info.get("original_request", {}),
|
||||
"created_at": task_info.get("created_at", 0),
|
||||
"timestamp": task_info.get("created_at", 0)
|
||||
})
|
||||
"timestamp": task_info.get("created_at", 0),
|
||||
}
|
||||
)
|
||||
|
||||
# Sort tasks by creation time (newest first, or by timestamp if creation time is missing)
|
||||
detailed_tasks.sort(key=lambda x: x.get('timestamp', x.get('created_at', 0)), reverse=True)
|
||||
detailed_tasks.sort(
|
||||
key=lambda x: x.get("timestamp", x.get("created_at", 0)), reverse=True
|
||||
)
|
||||
|
||||
return jsonify(detailed_tasks)
|
||||
except Exception as e:
|
||||
@@ -151,7 +180,7 @@ def list_prg_files():
|
||||
return jsonify({"error": "Failed to retrieve task list"}), 500
|
||||
|
||||
|
||||
@prgs_bp.route('/retry/<task_id>', methods=['POST'])
|
||||
@prgs_bp.route("/retry/<task_id>", methods=["POST"])
|
||||
def retry_task_endpoint(task_id):
|
||||
"""
|
||||
Retry a failed task.
|
||||
@@ -170,15 +199,17 @@ def retry_task_endpoint(task_id):
|
||||
|
||||
# If not found in new system, we need to handle the old system retry
|
||||
# For now, return an error as we're transitioning to the new system
|
||||
return jsonify({
|
||||
return jsonify(
|
||||
{
|
||||
"status": "error",
|
||||
"message": "Retry for old system is not supported in the new API. Please use the new task ID format."
|
||||
}), 400
|
||||
"message": "Retry for old system is not supported in the new API. Please use the new task ID format.",
|
||||
}
|
||||
), 400
|
||||
except Exception as e:
|
||||
abort(500, f"An error occurred: {e}")
|
||||
|
||||
|
||||
@prgs_bp.route('/cancel/<task_id>', methods=['POST'])
|
||||
@prgs_bp.route("/cancel/<task_id>", methods=["POST"])
|
||||
def cancel_task_endpoint(task_id):
|
||||
"""
|
||||
Cancel a running or queued task.
|
||||
@@ -197,9 +228,11 @@ def cancel_task_endpoint(task_id):
|
||||
|
||||
# If not found in new system, we need to handle the old system cancellation
|
||||
# For now, return an error as we're transitioning to the new system
|
||||
return jsonify({
|
||||
return jsonify(
|
||||
{
|
||||
"status": "error",
|
||||
"message": "Cancellation for old system is not supported in the new API. Please use the new task ID format."
|
||||
}), 400
|
||||
"message": "Cancellation for old system is not supported in the new API. Please use the new task ID format.",
|
||||
}
|
||||
), 400
|
||||
except Exception as e:
|
||||
abort(500, f"An error occurred: {e}")
|
||||
|
||||
@@ -1,66 +1,67 @@
|
||||
from flask import Blueprint, jsonify, request
|
||||
import logging
|
||||
from routes.utils.search import search # Corrected import
|
||||
from routes.config import get_config # Import get_config function
|
||||
|
||||
search_bp = Blueprint('search', __name__)
|
||||
search_bp = Blueprint("search", __name__)
|
||||
|
||||
@search_bp.route('/search', methods=['GET'])
|
||||
|
||||
@search_bp.route("/search", methods=["GET"])
|
||||
def handle_search():
|
||||
try:
|
||||
# Get query parameters
|
||||
query = request.args.get('q', '')
|
||||
search_type = request.args.get('search_type', '')
|
||||
limit = int(request.args.get('limit', 10))
|
||||
main = request.args.get('main', '') # Get the main parameter for account selection
|
||||
query = request.args.get("q", "")
|
||||
search_type = request.args.get("search_type", "")
|
||||
limit = int(request.args.get("limit", 10))
|
||||
main = request.args.get(
|
||||
"main", ""
|
||||
) # Get the main parameter for account selection
|
||||
|
||||
# If main parameter is not provided in the request, get it from config
|
||||
if not main:
|
||||
config = get_config()
|
||||
if config and 'spotify' in config:
|
||||
main = config['spotify']
|
||||
if config and "spotify" in config:
|
||||
main = config["spotify"]
|
||||
print(f"Using main from config: {main}")
|
||||
|
||||
|
||||
# Validate parameters
|
||||
if not query:
|
||||
return jsonify({'error': 'Missing search query'}), 400
|
||||
return jsonify({"error": "Missing search query"}), 400
|
||||
|
||||
valid_types = ['track', 'album', 'artist', 'playlist', 'episode']
|
||||
valid_types = ["track", "album", "artist", "playlist", "episode"]
|
||||
if search_type not in valid_types:
|
||||
return jsonify({'error': 'Invalid search type'}), 400
|
||||
return jsonify({"error": "Invalid search type"}), 400
|
||||
|
||||
# Perform the search with corrected parameter name
|
||||
raw_results = search(
|
||||
query=query,
|
||||
search_type=search_type, # Fixed parameter name
|
||||
limit=limit,
|
||||
main=main # Pass the main parameter
|
||||
main=main, # Pass the main parameter
|
||||
)
|
||||
|
||||
|
||||
# Extract items from the appropriate section of the response based on search_type
|
||||
items = []
|
||||
if raw_results and search_type + 's' in raw_results:
|
||||
type_key = search_type + 's'
|
||||
items = raw_results[type_key].get('items', [])
|
||||
if raw_results and search_type + "s" in raw_results:
|
||||
type_key = search_type + "s"
|
||||
items = raw_results[type_key].get("items", [])
|
||||
elif raw_results and search_type in raw_results:
|
||||
|
||||
items = raw_results[search_type].get('items', [])
|
||||
|
||||
items = raw_results[search_type].get("items", [])
|
||||
|
||||
# Return both the items array and the full data for debugging
|
||||
return jsonify({
|
||||
'items': items,
|
||||
'data': raw_results, # Include full data for debugging
|
||||
'error': None
|
||||
})
|
||||
return jsonify(
|
||||
{
|
||||
"items": items,
|
||||
"data": raw_results, # Include full data for debugging
|
||||
"error": None,
|
||||
}
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
print(f"ValueError in search: {str(e)}")
|
||||
return jsonify({'error': str(e)}), 400
|
||||
return jsonify({"error": str(e)}), 400
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
print(f"Exception in search: {str(e)}")
|
||||
print(traceback.format_exc())
|
||||
return jsonify({'error': f'Internal server error: {str(e)}'}), 500
|
||||
return jsonify({"error": f"Internal server error: {str(e)}"}), 500
|
||||
|
||||
127
routes/track.py
127
routes/track.py
@@ -1,17 +1,21 @@
|
||||
from flask import Blueprint, Response, request
|
||||
import os
|
||||
import json
|
||||
import traceback
|
||||
import uuid # For generating error task IDs
|
||||
import time # For timestamps
|
||||
from routes.utils.celery_queue_manager import download_queue_manager
|
||||
from routes.utils.celery_tasks import store_task_info, store_task_status, ProgressState # For error task creation
|
||||
from routes.utils.celery_tasks import (
|
||||
store_task_info,
|
||||
store_task_status,
|
||||
ProgressState,
|
||||
) # For error task creation
|
||||
from urllib.parse import urlparse # for URL validation
|
||||
from routes.utils.get_info import get_spotify_info # Added import
|
||||
|
||||
track_bp = Blueprint('track', __name__)
|
||||
track_bp = Blueprint("track", __name__)
|
||||
|
||||
@track_bp.route('/download/<track_id>', methods=['GET'])
|
||||
|
||||
@track_bp.route("/download/<track_id>", methods=["GET"])
|
||||
def handle_download(track_id):
|
||||
# Retrieve essential parameters from the request.
|
||||
# name = request.args.get('name') # Removed
|
||||
@@ -25,132 +29,151 @@ def handle_download(track_id):
|
||||
# Fetch metadata from Spotify
|
||||
try:
|
||||
track_info = get_spotify_info(track_id, "track")
|
||||
if not track_info or not track_info.get('name') or not track_info.get('artists'):
|
||||
if (
|
||||
not track_info
|
||||
or not track_info.get("name")
|
||||
or not track_info.get("artists")
|
||||
):
|
||||
return Response(
|
||||
json.dumps({"error": f"Could not retrieve metadata for track ID: {track_id}"}),
|
||||
json.dumps(
|
||||
{"error": f"Could not retrieve metadata for track ID: {track_id}"}
|
||||
),
|
||||
status=404,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
name_from_spotify = track_info.get('name')
|
||||
artist_from_spotify = track_info['artists'][0].get('name') if track_info['artists'] else "Unknown Artist"
|
||||
name_from_spotify = track_info.get("name")
|
||||
artist_from_spotify = (
|
||||
track_info["artists"][0].get("name")
|
||||
if track_info["artists"]
|
||||
else "Unknown Artist"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
json.dumps({"error": f"Failed to fetch metadata for track {track_id}: {str(e)}"}),
|
||||
json.dumps(
|
||||
{"error": f"Failed to fetch metadata for track {track_id}: {str(e)}"}
|
||||
),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
# Validate required parameters
|
||||
if not url:
|
||||
return Response(
|
||||
json.dumps({"error": "Missing required parameter: url", "original_url": url}),
|
||||
json.dumps(
|
||||
{"error": "Missing required parameter: url", "original_url": url}
|
||||
),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
# Validate URL domain
|
||||
parsed = urlparse(url)
|
||||
host = parsed.netloc.lower()
|
||||
if not (host.endswith('deezer.com') or host.endswith('open.spotify.com') or host.endswith('spotify.com')):
|
||||
if not (
|
||||
host.endswith("deezer.com")
|
||||
or host.endswith("open.spotify.com")
|
||||
or host.endswith("spotify.com")
|
||||
):
|
||||
return Response(
|
||||
json.dumps({"error": f"Invalid Link {url} :(", "original_url": url}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
try:
|
||||
task_id = download_queue_manager.add_task({
|
||||
task_id = download_queue_manager.add_task(
|
||||
{
|
||||
"download_type": "track",
|
||||
"url": url,
|
||||
"name": name_from_spotify, # Use fetched name
|
||||
"artist": artist_from_spotify, # Use fetched artist
|
||||
"orig_request": orig_params
|
||||
})
|
||||
"orig_request": orig_params,
|
||||
}
|
||||
)
|
||||
# Removed DuplicateDownloadError handling, add_task now manages this by creating an error task.
|
||||
except Exception as e:
|
||||
# Generic error handling for other issues during task submission
|
||||
error_task_id = str(uuid.uuid4())
|
||||
store_task_info(error_task_id, {
|
||||
store_task_info(
|
||||
error_task_id,
|
||||
{
|
||||
"download_type": "track",
|
||||
"url": url,
|
||||
"name": name_from_spotify, # Use fetched name
|
||||
"artist": artist_from_spotify, # Use fetched artist
|
||||
"original_request": orig_params,
|
||||
"created_at": time.time(),
|
||||
"is_submission_error_task": True
|
||||
})
|
||||
store_task_status(error_task_id, {
|
||||
"is_submission_error_task": True,
|
||||
},
|
||||
)
|
||||
store_task_status(
|
||||
error_task_id,
|
||||
{
|
||||
"status": ProgressState.ERROR,
|
||||
"error": f"Failed to queue track download: {str(e)}",
|
||||
"timestamp": time.time()
|
||||
})
|
||||
"timestamp": time.time(),
|
||||
},
|
||||
)
|
||||
return Response(
|
||||
json.dumps({"error": f"Failed to queue track download: {str(e)}", "task_id": error_task_id}),
|
||||
json.dumps(
|
||||
{
|
||||
"error": f"Failed to queue track download: {str(e)}",
|
||||
"task_id": error_task_id,
|
||||
}
|
||||
),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
return Response(
|
||||
json.dumps({"prg_file": task_id}), # prg_file is the old name for task_id
|
||||
status=202,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
@track_bp.route('/download/cancel', methods=['GET'])
|
||||
|
||||
@track_bp.route("/download/cancel", methods=["GET"])
|
||||
def cancel_download():
|
||||
"""
|
||||
Cancel a running track download process by its process id (prg file name).
|
||||
"""
|
||||
prg_file = request.args.get('prg_file')
|
||||
prg_file = request.args.get("prg_file")
|
||||
if not prg_file:
|
||||
return Response(
|
||||
json.dumps({"error": "Missing process id (prg_file) parameter"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
# Use the queue manager's cancellation method.
|
||||
result = download_queue_manager.cancel_task(prg_file)
|
||||
status_code = 200 if result.get("status") == "cancelled" else 404
|
||||
|
||||
return Response(
|
||||
json.dumps(result),
|
||||
status=status_code,
|
||||
mimetype='application/json'
|
||||
)
|
||||
return Response(json.dumps(result), status=status_code, mimetype="application/json")
|
||||
|
||||
@track_bp.route('/info', methods=['GET'])
|
||||
|
||||
@track_bp.route("/info", methods=["GET"])
|
||||
def get_track_info():
|
||||
"""
|
||||
Retrieve Spotify track metadata given a Spotify track ID.
|
||||
Expects a query parameter 'id' that contains the Spotify track ID.
|
||||
"""
|
||||
spotify_id = request.args.get('id')
|
||||
spotify_id = request.args.get("id")
|
||||
|
||||
if not spotify_id:
|
||||
return Response(
|
||||
json.dumps({"error": "Missing parameter: id"}),
|
||||
status=400,
|
||||
mimetype='application/json'
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
try:
|
||||
# Import and use the get_spotify_info function from the utility module.
|
||||
from routes.utils.get_info import get_spotify_info
|
||||
|
||||
track_info = get_spotify_info(spotify_id, "track")
|
||||
return Response(
|
||||
json.dumps(track_info),
|
||||
status=200,
|
||||
mimetype='application/json'
|
||||
)
|
||||
return Response(json.dumps(track_info), status=200, mimetype="application/json")
|
||||
except Exception as e:
|
||||
error_data = {
|
||||
"error": str(e),
|
||||
"traceback": traceback.format_exc()
|
||||
}
|
||||
return Response(
|
||||
json.dumps(error_data),
|
||||
status=500,
|
||||
mimetype='application/json'
|
||||
)
|
||||
error_data = {"error": str(e), "traceback": traceback.format_exc()}
|
||||
return Response(json.dumps(error_data), status=500, mimetype="application/json")
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import os
|
||||
import json
|
||||
import traceback
|
||||
from deezspot.spotloader import SpoLogin
|
||||
from deezspot.deezloader import DeeLogin
|
||||
from pathlib import Path
|
||||
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds, get_spotify_blob_path
|
||||
from routes.utils.celery_config import get_config_params
|
||||
from routes.utils.credentials import (
|
||||
get_credential,
|
||||
_get_global_spotify_api_creds,
|
||||
get_spotify_blob_path,
|
||||
)
|
||||
|
||||
|
||||
def download_album(
|
||||
url,
|
||||
@@ -23,51 +24,63 @@ def download_album(
|
||||
max_retries=3,
|
||||
progress_callback=None,
|
||||
convert_to=None,
|
||||
bitrate=None
|
||||
bitrate=None,
|
||||
):
|
||||
try:
|
||||
# Detect URL source (Spotify or Deezer) from URL
|
||||
is_spotify_url = 'open.spotify.com' in url.lower()
|
||||
is_deezer_url = 'deezer.com' in url.lower()
|
||||
is_spotify_url = "open.spotify.com" in url.lower()
|
||||
is_deezer_url = "deezer.com" in url.lower()
|
||||
|
||||
service = ''
|
||||
service = ""
|
||||
if is_spotify_url:
|
||||
service = 'spotify'
|
||||
service = "spotify"
|
||||
elif is_deezer_url:
|
||||
service = 'deezer'
|
||||
service = "deezer"
|
||||
else:
|
||||
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
|
||||
print(f"ERROR: {error_msg}")
|
||||
raise ValueError(error_msg)
|
||||
|
||||
print(f"DEBUG: album.py - Service determined from URL: {service}")
|
||||
print(f"DEBUG: album.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'")
|
||||
print(
|
||||
f"DEBUG: album.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
|
||||
)
|
||||
|
||||
# Get global Spotify API credentials
|
||||
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds()
|
||||
global_spotify_client_id, global_spotify_client_secret = (
|
||||
_get_global_spotify_api_creds()
|
||||
)
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
warning_msg = "WARN: album.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
|
||||
print(warning_msg)
|
||||
|
||||
if service == 'spotify':
|
||||
if service == "spotify":
|
||||
if fallback: # Fallback is a Deezer account name for a Spotify URL
|
||||
if quality is None: quality = 'FLAC' # Deezer quality for first attempt
|
||||
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails)
|
||||
if quality is None:
|
||||
quality = "FLAC" # Deezer quality for first attempt
|
||||
if fall_quality is None:
|
||||
fall_quality = (
|
||||
"HIGH" # Spotify quality for fallback (if Deezer fails)
|
||||
)
|
||||
|
||||
deezer_error = None
|
||||
try:
|
||||
# Attempt 1: Deezer via download_albumspo (using 'fallback' as Deezer account name)
|
||||
print(f"DEBUG: album.py - Spotify URL. Attempt 1: Deezer (account: {fallback})")
|
||||
deezer_fallback_creds = get_credential('deezer', fallback)
|
||||
arl = deezer_fallback_creds.get('arl')
|
||||
print(
|
||||
f"DEBUG: album.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
|
||||
)
|
||||
deezer_fallback_creds = get_credential("deezer", fallback)
|
||||
arl = deezer_fallback_creds.get("arl")
|
||||
if not arl:
|
||||
raise ValueError(f"ARL not found for Deezer account '{fallback}'.")
|
||||
raise ValueError(
|
||||
f"ARL not found for Deezer account '{fallback}'."
|
||||
)
|
||||
|
||||
dl = DeeLogin(
|
||||
arl=arl,
|
||||
spotify_client_id=global_spotify_client_id,
|
||||
spotify_client_secret=global_spotify_client_secret,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
dl.download_albumspo(
|
||||
link_album=url, # Spotify URL
|
||||
@@ -85,29 +98,44 @@ def download_album(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL."
|
||||
)
|
||||
print(f"DEBUG: album.py - Album download via Deezer (account: {fallback}) successful for Spotify URL.")
|
||||
except Exception as e:
|
||||
deezer_error = e
|
||||
print(f"ERROR: album.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}")
|
||||
print(
|
||||
f"ERROR: album.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
|
||||
)
|
||||
traceback.print_exc()
|
||||
print(f"DEBUG: album.py - Attempting Spotify direct download (account: {main} for blob)...")
|
||||
print(
|
||||
f"DEBUG: album.py - Attempting Spotify direct download (account: {main} for blob)..."
|
||||
)
|
||||
|
||||
# Attempt 2: Spotify direct via download_album (using 'main' as Spotify account for blob)
|
||||
try:
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
|
||||
if (
|
||||
not global_spotify_client_id
|
||||
or not global_spotify_client_secret
|
||||
):
|
||||
raise ValueError(
|
||||
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
|
||||
)
|
||||
|
||||
blob_file_path = get_spotify_blob_path(main)
|
||||
if not blob_file_path or not blob_file_path.exists():
|
||||
raise FileNotFoundError(f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}")
|
||||
raise FileNotFoundError(
|
||||
f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}"
|
||||
)
|
||||
|
||||
spo = SpoLogin(
|
||||
credentials_path=str(blob_file_path), # Ensure it's a string
|
||||
credentials_path=str(
|
||||
blob_file_path
|
||||
), # Ensure it's a string
|
||||
spotify_client_id=global_spotify_client_id,
|
||||
spotify_client_secret=global_spotify_client_secret,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
spo.download_album(
|
||||
link_album=url, # Spotify URL
|
||||
@@ -126,31 +154,42 @@ def download_album(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful."
|
||||
)
|
||||
print(f"DEBUG: album.py - Spotify direct download (account: {main} for blob) successful.")
|
||||
except Exception as e2:
|
||||
print(f"ERROR: album.py - Spotify direct download (account: {main} for blob) also failed: {e2}")
|
||||
print(
|
||||
f"ERROR: album.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
|
||||
f"Deezer error: {deezer_error}, Spotify error: {e2}"
|
||||
) from e2
|
||||
else:
|
||||
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
|
||||
if quality is None: quality = 'HIGH' # Default Spotify quality
|
||||
print(f"DEBUG: album.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}")
|
||||
if quality is None:
|
||||
quality = "HIGH" # Default Spotify quality
|
||||
print(
|
||||
f"DEBUG: album.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
|
||||
)
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
|
||||
raise ValueError(
|
||||
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
|
||||
)
|
||||
|
||||
blob_file_path = get_spotify_blob_path(main)
|
||||
if not blob_file_path or not blob_file_path.exists():
|
||||
raise FileNotFoundError(f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}")
|
||||
raise FileNotFoundError(
|
||||
f"Spotify credentials blob file not found or path is invalid for account '{main}'. Path: {str(blob_file_path)}"
|
||||
)
|
||||
|
||||
spo = SpoLogin(
|
||||
credentials_path=str(blob_file_path), # Ensure it's a string
|
||||
spotify_client_id=global_spotify_client_id,
|
||||
spotify_client_secret=global_spotify_client_secret,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
spo.download_album(
|
||||
link_album=url,
|
||||
@@ -169,16 +208,21 @@ def download_album(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful."
|
||||
)
|
||||
print(f"DEBUG: album.py - Direct Spotify download (account: {main} for blob) successful.")
|
||||
|
||||
elif service == 'deezer':
|
||||
elif service == "deezer":
|
||||
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
|
||||
if quality is None: quality = 'FLAC' # Default Deezer quality
|
||||
print(f"DEBUG: album.py - Deezer URL. Direct download with Deezer account: {main}")
|
||||
deezer_main_creds = get_credential('deezer', main) # For ARL
|
||||
arl = deezer_main_creds.get('arl')
|
||||
if quality is None:
|
||||
quality = "FLAC" # Default Deezer quality
|
||||
print(
|
||||
f"DEBUG: album.py - Deezer URL. Direct download with Deezer account: {main}"
|
||||
)
|
||||
deezer_main_creds = get_credential("deezer", main) # For ARL
|
||||
arl = deezer_main_creds.get("arl")
|
||||
if not arl:
|
||||
raise ValueError(f"ARL not found for Deezer account '{main}'.")
|
||||
|
||||
@@ -186,7 +230,7 @@ def download_album(
|
||||
arl=arl, # Account specific ARL
|
||||
spotify_client_id=global_spotify_client_id, # Global Spotify keys
|
||||
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
dl.download_albumdee( # Deezer URL, download via Deezer
|
||||
link_album=url,
|
||||
@@ -203,9 +247,11 @@ def download_album(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: album.py - Direct Deezer download (account: {main}) successful."
|
||||
)
|
||||
print(f"DEBUG: album.py - Direct Deezer download (account: {main}) successful.")
|
||||
else:
|
||||
# Should be caught by initial service check, but as a safeguard
|
||||
raise ValueError(f"Unsupported service determined: {service}")
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import json
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
import os
|
||||
import logging
|
||||
from flask import Blueprint, Response, request, url_for
|
||||
from routes.utils.celery_queue_manager import download_queue_manager, get_config_params
|
||||
from flask import url_for
|
||||
from routes.utils.celery_queue_manager import download_queue_manager
|
||||
from routes.utils.get_info import get_spotify_info
|
||||
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
|
||||
from routes.utils.celery_tasks import get_last_task_status, ProgressState
|
||||
@@ -15,12 +12,18 @@ from deezspot.libutils.utils import get_ids, link_is_valid
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def log_json(message_dict):
|
||||
"""Helper function to output a JSON-formatted log message."""
|
||||
print(json.dumps(message_dict))
|
||||
|
||||
|
||||
def get_artist_discography(url, main_spotify_account_name, album_type='album,single,compilation,appears_on', progress_callback=None):
|
||||
def get_artist_discography(
|
||||
url,
|
||||
main_spotify_account_name,
|
||||
album_type="album,single,compilation,appears_on",
|
||||
progress_callback=None,
|
||||
):
|
||||
"""
|
||||
Validate the URL, extract the artist ID, and retrieve the discography.
|
||||
Uses global Spotify API client_id/secret for Spo initialization.
|
||||
@@ -39,21 +42,34 @@ def get_artist_discography(url, main_spotify_account_name, album_type='album,sin
|
||||
client_id, client_secret = _get_global_spotify_api_creds()
|
||||
|
||||
if not client_id or not client_secret:
|
||||
log_json({"status": "error", "message": "Global Spotify API client_id or client_secret not configured."})
|
||||
log_json(
|
||||
{
|
||||
"status": "error",
|
||||
"message": "Global Spotify API client_id or client_secret not configured.",
|
||||
}
|
||||
)
|
||||
raise ValueError("Global Spotify API credentials are not configured.")
|
||||
|
||||
if not main_spotify_account_name:
|
||||
# This is a warning now, as API keys are global.
|
||||
logger.warning("main_spotify_account_name not provided for get_artist_discography context. Using global API keys.")
|
||||
logger.warning(
|
||||
"main_spotify_account_name not provided for get_artist_discography context. Using global API keys."
|
||||
)
|
||||
else:
|
||||
# Check if account exists for context, good for consistency
|
||||
try:
|
||||
get_credential('spotify', main_spotify_account_name)
|
||||
logger.debug(f"Spotify account context '{main_spotify_account_name}' exists for get_artist_discography.")
|
||||
get_credential("spotify", main_spotify_account_name)
|
||||
logger.debug(
|
||||
f"Spotify account context '{main_spotify_account_name}' exists for get_artist_discography."
|
||||
)
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Spotify account '{main_spotify_account_name}' provided for discography context not found.")
|
||||
logger.warning(
|
||||
f"Spotify account '{main_spotify_account_name}' provided for discography context not found."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error checking Spotify account '{main_spotify_account_name}' for discography context: {e}")
|
||||
logger.warning(
|
||||
f"Error checking Spotify account '{main_spotify_account_name}' for discography context: {e}"
|
||||
)
|
||||
|
||||
Spo.__init__(client_id, client_secret) # Initialize with global API keys
|
||||
|
||||
@@ -78,7 +94,9 @@ def get_artist_discography(url, main_spotify_account_name, album_type='album,sin
|
||||
raise
|
||||
|
||||
|
||||
def download_artist_albums(url, album_type="album,single,compilation", request_args=None):
|
||||
def download_artist_albums(
|
||||
url, album_type="album,single,compilation", request_args=None
|
||||
):
|
||||
"""
|
||||
Download albums by an artist, filtered by album types.
|
||||
|
||||
@@ -95,19 +113,20 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
|
||||
raise ValueError("Missing required parameter: url")
|
||||
|
||||
# Extract artist ID from URL
|
||||
artist_id = url.split('/')[-1]
|
||||
if '?' in artist_id:
|
||||
artist_id = artist_id.split('?')[0]
|
||||
artist_id = url.split("/")[-1]
|
||||
if "?" in artist_id:
|
||||
artist_id = artist_id.split("?")[0]
|
||||
|
||||
logger.info(f"Fetching artist info for ID: {artist_id}")
|
||||
|
||||
# Detect URL source (only Spotify is supported for artists)
|
||||
is_spotify_url = 'open.spotify.com' in url.lower()
|
||||
is_deezer_url = 'deezer.com' in url.lower()
|
||||
is_spotify_url = "open.spotify.com" in url.lower()
|
||||
|
||||
# Artist functionality only works with Spotify URLs currently
|
||||
if not is_spotify_url:
|
||||
error_msg = "Invalid URL: Artist functionality only supports open.spotify.com URLs"
|
||||
error_msg = (
|
||||
"Invalid URL: Artist functionality only supports open.spotify.com URLs"
|
||||
)
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
@@ -115,33 +134,40 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
|
||||
artist_data = get_spotify_info(artist_id, "artist_discography")
|
||||
|
||||
# Debug logging to inspect the structure of artist_data
|
||||
logger.debug(f"Artist data structure has keys: {list(artist_data.keys() if isinstance(artist_data, dict) else [])}")
|
||||
logger.debug(
|
||||
f"Artist data structure has keys: {list(artist_data.keys() if isinstance(artist_data, dict) else [])}"
|
||||
)
|
||||
|
||||
if not artist_data or 'items' not in artist_data:
|
||||
raise ValueError(f"Failed to retrieve artist data or no albums found for artist ID {artist_id}")
|
||||
if not artist_data or "items" not in artist_data:
|
||||
raise ValueError(
|
||||
f"Failed to retrieve artist data or no albums found for artist ID {artist_id}"
|
||||
)
|
||||
|
||||
# Parse the album types to filter by
|
||||
allowed_types = [t.strip().lower() for t in album_type.split(",")]
|
||||
logger.info(f"Filtering albums by types: {allowed_types}")
|
||||
|
||||
# Get artist name from the first album
|
||||
artist_name = ""
|
||||
if artist_data.get('items') and len(artist_data['items']) > 0:
|
||||
first_album = artist_data['items'][0]
|
||||
if first_album.get('artists') and len(first_album['artists']) > 0:
|
||||
artist_name = first_album['artists'][0].get('name', '')
|
||||
|
||||
# Filter albums by the specified types
|
||||
filtered_albums = []
|
||||
for album in artist_data.get('items', []):
|
||||
album_type_value = album.get('album_type', '').lower()
|
||||
album_group_value = album.get('album_group', '').lower()
|
||||
for album in artist_data.get("items", []):
|
||||
album_type_value = album.get("album_type", "").lower()
|
||||
album_group_value = album.get("album_group", "").lower()
|
||||
|
||||
# Apply filtering logic based on album_type and album_group
|
||||
if (('album' in allowed_types and album_type_value == 'album' and album_group_value == 'album') or
|
||||
('single' in allowed_types and album_type_value == 'single' and album_group_value == 'single') or
|
||||
('compilation' in allowed_types and album_type_value == 'compilation') or
|
||||
('appears_on' in allowed_types and album_group_value == 'appears_on')):
|
||||
if (
|
||||
(
|
||||
"album" in allowed_types
|
||||
and album_type_value == "album"
|
||||
and album_group_value == "album"
|
||||
)
|
||||
or (
|
||||
"single" in allowed_types
|
||||
and album_type_value == "single"
|
||||
and album_group_value == "single"
|
||||
)
|
||||
or ("compilation" in allowed_types and album_type_value == "compilation")
|
||||
or ("appears_on" in allowed_types and album_group_value == "appears_on")
|
||||
):
|
||||
filtered_albums.append(album)
|
||||
|
||||
if not filtered_albums:
|
||||
@@ -158,14 +184,18 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
|
||||
logger.debug(f"Processing album: {album.get('name', 'Unknown')}")
|
||||
logger.debug(f"Album structure has keys: {list(album.keys())}")
|
||||
|
||||
external_urls = album.get('external_urls', {})
|
||||
external_urls = album.get("external_urls", {})
|
||||
logger.debug(f"Album external_urls: {external_urls}")
|
||||
|
||||
album_url = external_urls.get('spotify', '')
|
||||
album_name = album.get('name', 'Unknown Album')
|
||||
album_artists = album.get('artists', [])
|
||||
album_artist = album_artists[0].get('name', 'Unknown Artist') if album_artists else 'Unknown Artist'
|
||||
album_id = album.get('id')
|
||||
album_url = external_urls.get("spotify", "")
|
||||
album_name = album.get("name", "Unknown Album")
|
||||
album_artists = album.get("artists", [])
|
||||
album_artist = (
|
||||
album_artists[0].get("name", "Unknown Artist")
|
||||
if album_artists
|
||||
else "Unknown Artist"
|
||||
)
|
||||
album_id = album.get("id")
|
||||
|
||||
logger.debug(f"Extracted album URL: {album_url}")
|
||||
logger.debug(f"Extracted album ID: {album_id}")
|
||||
@@ -182,11 +212,13 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
|
||||
"type": "album",
|
||||
# URL source will be automatically detected in the download functions
|
||||
"parent_artist_url": url,
|
||||
"parent_request_type": "artist"
|
||||
"parent_request_type": "artist",
|
||||
}
|
||||
|
||||
# Include original download URL for this album task
|
||||
album_request_args["original_url"] = url_for('album.handle_download', album_id=album_id, _external=True)
|
||||
album_request_args["original_url"] = url_for(
|
||||
"album.handle_download", album_id=album_id, _external=True
|
||||
)
|
||||
|
||||
# Create task for this album
|
||||
task_data = {
|
||||
@@ -196,11 +228,13 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
|
||||
"retry_url": album_url, # Use album URL for retry logic, not artist URL
|
||||
"name": album_name,
|
||||
"artist": album_artist,
|
||||
"orig_request": album_request_args # Store album-specific request params
|
||||
"orig_request": album_request_args, # Store album-specific request params
|
||||
}
|
||||
|
||||
# Debug log the task data being sent to the queue
|
||||
logger.debug(f"Album task data: url={task_data['url']}, retry_url={task_data['retry_url']}")
|
||||
logger.debug(
|
||||
f"Album task data: url={task_data['url']}, retry_url={task_data['retry_url']}"
|
||||
)
|
||||
|
||||
try:
|
||||
task_id = download_queue_manager.add_task(task_data)
|
||||
@@ -208,32 +242,50 @@ def download_artist_albums(url, album_type="album,single,compilation", request_a
|
||||
# Check the status of the newly added task to see if it was marked as a duplicate error
|
||||
last_status = get_last_task_status(task_id)
|
||||
|
||||
if last_status and last_status.get("status") == ProgressState.ERROR and last_status.get("existing_task_id"):
|
||||
logger.warning(f"Album {album_name} (URL: {album_url}) is a duplicate. Error task ID: {task_id}. Existing task ID: {last_status.get('existing_task_id')}")
|
||||
duplicate_albums.append({
|
||||
if (
|
||||
last_status
|
||||
and last_status.get("status") == ProgressState.ERROR
|
||||
and last_status.get("existing_task_id")
|
||||
):
|
||||
logger.warning(
|
||||
f"Album {album_name} (URL: {album_url}) is a duplicate. Error task ID: {task_id}. Existing task ID: {last_status.get('existing_task_id')}"
|
||||
)
|
||||
duplicate_albums.append(
|
||||
{
|
||||
"name": album_name,
|
||||
"artist": album_artist,
|
||||
"url": album_url,
|
||||
"error_task_id": task_id, # This is the ID of the task marked as a duplicate error
|
||||
"existing_task_id": last_status.get("existing_task_id"),
|
||||
"message": last_status.get("message", "Duplicate download attempt.")
|
||||
})
|
||||
"message": last_status.get(
|
||||
"message", "Duplicate download attempt."
|
||||
),
|
||||
}
|
||||
)
|
||||
else:
|
||||
# If not a duplicate error, it was successfully queued (or failed for other reasons handled by add_task)
|
||||
# We only add to successfully_queued_albums if it wasn't a duplicate error from add_task
|
||||
# Other errors from add_task (like submission failure) would also result in an error status for task_id
|
||||
# but won't have 'existing_task_id'. The client can check the status of this task_id.
|
||||
album_task_ids.append(task_id) # Keep track of all task_ids returned by add_task
|
||||
successfully_queued_albums.append({
|
||||
album_task_ids.append(
|
||||
task_id
|
||||
) # Keep track of all task_ids returned by add_task
|
||||
successfully_queued_albums.append(
|
||||
{
|
||||
"name": album_name,
|
||||
"artist": album_artist,
|
||||
"url": album_url,
|
||||
"task_id": task_id
|
||||
})
|
||||
"task_id": task_id,
|
||||
}
|
||||
)
|
||||
logger.info(f"Queued album download: {album_name} ({task_id})")
|
||||
except Exception as e: # Catch any other unexpected error from add_task itself (though it should be rare now)
|
||||
logger.error(f"Failed to queue album {album_name} due to an unexpected error in add_task: {str(e)}")
|
||||
logger.error(
|
||||
f"Failed to queue album {album_name} due to an unexpected error in add_task: {str(e)}"
|
||||
)
|
||||
# Optionally, collect these errors. For now, just logging and continuing.
|
||||
|
||||
logger.info(f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found.")
|
||||
logger.info(
|
||||
f"Artist album processing: {len(successfully_queued_albums)} queued, {len(duplicate_albums)} duplicates found."
|
||||
)
|
||||
return successfully_queued_albums, duplicate_albums
|
||||
|
||||
@@ -7,43 +7,46 @@ from pathlib import Path
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Redis configuration - read from environment variables
|
||||
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
|
||||
REDIS_PORT = os.getenv('REDIS_PORT', '6379')
|
||||
REDIS_DB = os.getenv('REDIS_DB', '0')
|
||||
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
|
||||
REDIS_PORT = os.getenv("REDIS_PORT", "6379")
|
||||
REDIS_DB = os.getenv("REDIS_DB", "0")
|
||||
# Optional Redis password
|
||||
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '')
|
||||
REDIS_PASSWORD = os.getenv("REDIS_PASSWORD", "")
|
||||
# Build default URL with password if provided
|
||||
_password_part = f":{REDIS_PASSWORD}@" if REDIS_PASSWORD else ""
|
||||
default_redis_url = f"redis://{_password_part}{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}"
|
||||
REDIS_URL = os.getenv('REDIS_URL', default_redis_url)
|
||||
REDIS_BACKEND = os.getenv('REDIS_BACKEND', REDIS_URL)
|
||||
REDIS_URL = os.getenv("REDIS_URL", default_redis_url)
|
||||
REDIS_BACKEND = os.getenv("REDIS_BACKEND", REDIS_URL)
|
||||
|
||||
# Log Redis connection details
|
||||
logger.info(f"Redis configuration: REDIS_URL={REDIS_URL}, REDIS_BACKEND={REDIS_BACKEND}")
|
||||
logger.info(
|
||||
f"Redis configuration: REDIS_URL={REDIS_URL}, REDIS_BACKEND={REDIS_BACKEND}"
|
||||
)
|
||||
|
||||
# Config path
|
||||
CONFIG_FILE_PATH = Path('./data/config/main.json')
|
||||
CONFIG_FILE_PATH = Path("./data/config/main.json")
|
||||
|
||||
DEFAULT_MAIN_CONFIG = {
|
||||
'service': 'spotify',
|
||||
'spotify': '',
|
||||
'deezer': '',
|
||||
'fallback': False,
|
||||
'spotifyQuality': 'NORMAL',
|
||||
'deezerQuality': 'MP3_128',
|
||||
'realTime': False,
|
||||
'customDirFormat': '%ar_album%/%album%',
|
||||
'customTrackFormat': '%tracknum%. %music%',
|
||||
'tracknum_padding': True,
|
||||
'save_cover': True,
|
||||
'maxConcurrentDownloads': 3,
|
||||
'maxRetries': 3,
|
||||
'retryDelaySeconds': 5,
|
||||
'retry_delay_increase': 5,
|
||||
'convertTo': None,
|
||||
'bitrate': None
|
||||
"service": "spotify",
|
||||
"spotify": "",
|
||||
"deezer": "",
|
||||
"fallback": False,
|
||||
"spotifyQuality": "NORMAL",
|
||||
"deezerQuality": "MP3_128",
|
||||
"realTime": False,
|
||||
"customDirFormat": "%ar_album%/%album%",
|
||||
"customTrackFormat": "%tracknum%. %music%",
|
||||
"tracknum_padding": True,
|
||||
"save_cover": True,
|
||||
"maxConcurrentDownloads": 3,
|
||||
"maxRetries": 3,
|
||||
"retryDelaySeconds": 5,
|
||||
"retry_delay_increase": 5,
|
||||
"convertTo": None,
|
||||
"bitrate": None,
|
||||
}
|
||||
|
||||
|
||||
def get_config_params():
|
||||
"""
|
||||
Get configuration parameters from the config file.
|
||||
@@ -59,11 +62,11 @@ def get_config_params():
|
||||
|
||||
if not CONFIG_FILE_PATH.exists():
|
||||
logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default values.")
|
||||
with open(CONFIG_FILE_PATH, 'w') as f:
|
||||
with open(CONFIG_FILE_PATH, "w") as f:
|
||||
json.dump(DEFAULT_MAIN_CONFIG, f, indent=4)
|
||||
return DEFAULT_MAIN_CONFIG.copy() # Return a copy of defaults
|
||||
|
||||
with open(CONFIG_FILE_PATH, 'r') as f:
|
||||
with open(CONFIG_FILE_PATH, "r") as f:
|
||||
config = json.load(f)
|
||||
|
||||
# Ensure all default keys are present in the loaded config
|
||||
@@ -74,48 +77,54 @@ def get_config_params():
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
logger.info(f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults.")
|
||||
with open(CONFIG_FILE_PATH, 'w') as f:
|
||||
logger.info(
|
||||
f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults."
|
||||
)
|
||||
with open(CONFIG_FILE_PATH, "w") as f:
|
||||
json.dump(config, f, indent=4)
|
||||
|
||||
return config
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading or creating config at {CONFIG_FILE_PATH}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error reading or creating config at {CONFIG_FILE_PATH}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
# Return defaults if config read/create fails
|
||||
return DEFAULT_MAIN_CONFIG.copy()
|
||||
|
||||
|
||||
# Load configuration values we need for Celery
|
||||
config_params_values = get_config_params() # Renamed to avoid conflict with module name
|
||||
MAX_CONCURRENT_DL = config_params_values.get('maxConcurrentDownloads', 3)
|
||||
MAX_RETRIES = config_params_values.get('maxRetries', 3)
|
||||
RETRY_DELAY = config_params_values.get('retryDelaySeconds', 5)
|
||||
RETRY_DELAY_INCREASE = config_params_values.get('retry_delay_increase', 5)
|
||||
MAX_CONCURRENT_DL = config_params_values.get("maxConcurrentDownloads", 3)
|
||||
MAX_RETRIES = config_params_values.get("maxRetries", 3)
|
||||
RETRY_DELAY = config_params_values.get("retryDelaySeconds", 5)
|
||||
RETRY_DELAY_INCREASE = config_params_values.get("retry_delay_increase", 5)
|
||||
|
||||
# Define task queues
|
||||
task_queues = {
|
||||
'default': {
|
||||
'exchange': 'default',
|
||||
'routing_key': 'default',
|
||||
"default": {
|
||||
"exchange": "default",
|
||||
"routing_key": "default",
|
||||
},
|
||||
'downloads': {
|
||||
'exchange': 'downloads',
|
||||
'routing_key': 'downloads',
|
||||
"downloads": {
|
||||
"exchange": "downloads",
|
||||
"routing_key": "downloads",
|
||||
},
|
||||
"utility_tasks": {
|
||||
"exchange": "utility_tasks",
|
||||
"routing_key": "utility_tasks",
|
||||
},
|
||||
'utility_tasks': {
|
||||
'exchange': 'utility_tasks',
|
||||
'routing_key': 'utility_tasks',
|
||||
}
|
||||
}
|
||||
|
||||
# Set default queue
|
||||
task_default_queue = 'downloads'
|
||||
task_default_exchange = 'downloads'
|
||||
task_default_routing_key = 'downloads'
|
||||
task_default_queue = "downloads"
|
||||
task_default_exchange = "downloads"
|
||||
task_default_routing_key = "downloads"
|
||||
|
||||
# Celery task settings
|
||||
task_serializer = 'json'
|
||||
accept_content = ['json']
|
||||
result_serializer = 'json'
|
||||
task_serializer = "json"
|
||||
accept_content = ["json"]
|
||||
result_serializer = "json"
|
||||
enable_utc = True
|
||||
|
||||
# Configure worker concurrency based on MAX_CONCURRENT_DL
|
||||
@@ -123,15 +132,15 @@ worker_concurrency = MAX_CONCURRENT_DL
|
||||
|
||||
# Configure task rate limiting - these are per-minute limits
|
||||
task_annotations = {
|
||||
'routes.utils.celery_tasks.download_track': {
|
||||
'rate_limit': f'{MAX_CONCURRENT_DL}/m',
|
||||
"routes.utils.celery_tasks.download_track": {
|
||||
"rate_limit": f"{MAX_CONCURRENT_DL}/m",
|
||||
},
|
||||
'routes.utils.celery_tasks.download_album': {
|
||||
'rate_limit': f'{MAX_CONCURRENT_DL}/m',
|
||||
"routes.utils.celery_tasks.download_album": {
|
||||
"rate_limit": f"{MAX_CONCURRENT_DL}/m",
|
||||
},
|
||||
"routes.utils.celery_tasks.download_playlist": {
|
||||
"rate_limit": f"{MAX_CONCURRENT_DL}/m",
|
||||
},
|
||||
'routes.utils.celery_tasks.download_playlist': {
|
||||
'rate_limit': f'{MAX_CONCURRENT_DL}/m',
|
||||
}
|
||||
}
|
||||
|
||||
# Configure retry settings
|
||||
@@ -144,10 +153,10 @@ result_expires = 60 * 60 * 24 * 7 # 7 days
|
||||
|
||||
# Configure visibility timeout for task messages
|
||||
broker_transport_options = {
|
||||
'visibility_timeout': 3600, # 1 hour
|
||||
'fanout_prefix': True,
|
||||
'fanout_patterns': True,
|
||||
'priority_steps': [0, 3, 6, 9],
|
||||
"visibility_timeout": 3600, # 1 hour
|
||||
"fanout_prefix": True,
|
||||
"fanout_patterns": True,
|
||||
"priority_steps": [0, 3, 6, 9],
|
||||
}
|
||||
|
||||
# Important broker connection settings
|
||||
|
||||
@@ -1,47 +1,27 @@
|
||||
import os
|
||||
import json
|
||||
import signal
|
||||
import subprocess
|
||||
import logging
|
||||
import time
|
||||
import atexit
|
||||
from pathlib import Path
|
||||
import threading
|
||||
import queue
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
# Import Celery task utilities
|
||||
from .celery_tasks import (
|
||||
ProgressState,
|
||||
get_task_info,
|
||||
get_last_task_status,
|
||||
store_task_status,
|
||||
get_all_tasks as get_all_celery_tasks_info,
|
||||
cleanup_stale_errors,
|
||||
delayed_delete_task_data
|
||||
)
|
||||
from .celery_config import get_config_params, MAX_CONCURRENT_DL
|
||||
# Import history manager
|
||||
from .history_manager import init_history_db
|
||||
# Import credentials manager for DB init
|
||||
from .credentials import init_credentials_db
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Configuration
|
||||
CONFIG_PATH = './data/config/main.json'
|
||||
CELERY_APP = 'routes.utils.celery_tasks.celery_app'
|
||||
CONFIG_PATH = "./data/config/main.json"
|
||||
CELERY_APP = "routes.utils.celery_tasks.celery_app"
|
||||
CELERY_PROCESS = None
|
||||
CONFIG_CHECK_INTERVAL = 30 # seconds
|
||||
|
||||
|
||||
class CeleryManager:
|
||||
"""
|
||||
Manages Celery workers dynamically based on configuration changes.
|
||||
"""
|
||||
|
||||
def __init__(self, app_name="download_tasks"):
|
||||
def __init__(self, app_name="routes.utils.celery_tasks"):
|
||||
self.app_name = app_name
|
||||
self.download_worker_process = None
|
||||
self.utility_worker_process = None
|
||||
@@ -52,22 +32,31 @@ class CeleryManager:
|
||||
self.stop_event = threading.Event()
|
||||
self.config_monitor_thread = None
|
||||
# self.concurrency now specifically refers to download worker concurrency
|
||||
self.concurrency = get_config_params().get('maxConcurrentDownloads', MAX_CONCURRENT_DL)
|
||||
logger.info(f"CeleryManager initialized. Download concurrency set to: {self.concurrency}")
|
||||
self.concurrency = get_config_params().get(
|
||||
"maxConcurrentDownloads", MAX_CONCURRENT_DL
|
||||
)
|
||||
logger.info(
|
||||
f"CeleryManager initialized. Download concurrency set to: {self.concurrency}"
|
||||
)
|
||||
|
||||
def _get_worker_command(self, queues, concurrency, worker_name_suffix, log_level="INFO"):
|
||||
def _get_worker_command(
|
||||
self, queues, concurrency, worker_name_suffix, log_level="INFO"
|
||||
):
|
||||
# Use a unique worker name to avoid conflicts.
|
||||
# %h is replaced by celery with the actual hostname.
|
||||
hostname = f"worker_{worker_name_suffix}@%h"
|
||||
command = [
|
||||
"celery",
|
||||
"-A", self.app_name,
|
||||
"-A",
|
||||
self.app_name,
|
||||
"worker",
|
||||
"--loglevel=" + log_level,
|
||||
"-Q", queues,
|
||||
"-c", str(concurrency),
|
||||
"-Q",
|
||||
queues,
|
||||
"-c",
|
||||
str(concurrency),
|
||||
"--hostname=" + hostname,
|
||||
"--pool=prefork"
|
||||
"--pool=prefork",
|
||||
]
|
||||
# Optionally add --without-gossip, --without-mingle, --without-heartbeat
|
||||
# if experiencing issues or to reduce network load, but defaults are usually fine.
|
||||
@@ -78,22 +67,31 @@ class CeleryManager:
|
||||
def _process_output_reader(self, stream, log_prefix, error=False):
|
||||
logger.debug(f"Log reader thread started for {log_prefix}")
|
||||
try:
|
||||
for line in iter(stream.readline, ''):
|
||||
for line in iter(stream.readline, ""):
|
||||
if line:
|
||||
log_method = logger.error if error else logger.info
|
||||
log_method(f"{log_prefix}: {line.strip()}")
|
||||
elif self.stop_event.is_set(): # If empty line and stop is set, likely EOF
|
||||
elif (
|
||||
self.stop_event.is_set()
|
||||
): # If empty line and stop is set, likely EOF
|
||||
break
|
||||
# Loop may also exit if stream is closed by process termination
|
||||
except ValueError: # ValueError: I/O operation on closed file
|
||||
if not self.stop_event.is_set():
|
||||
logger.error(f"Error reading Celery output from {log_prefix} (ValueError - stream closed unexpectedly?)", exc_info=False) # Don't print full trace for common close error
|
||||
logger.error(
|
||||
f"Error reading Celery output from {log_prefix} (ValueError - stream closed unexpectedly?)",
|
||||
exc_info=False,
|
||||
) # Don't print full trace for common close error
|
||||
else:
|
||||
logger.info(f"{log_prefix} stream reader gracefully stopped due to closed stream after stop signal.")
|
||||
logger.info(
|
||||
f"{log_prefix} stream reader gracefully stopped due to closed stream after stop signal."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in log reader for {log_prefix}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Unexpected error in log reader for {log_prefix}: {e}", exc_info=True
|
||||
)
|
||||
finally:
|
||||
if hasattr(stream, 'close') and not stream.closed:
|
||||
if hasattr(stream, "close") and not stream.closed:
|
||||
stream.close()
|
||||
logger.info(f"{log_prefix} stream reader thread finished.")
|
||||
|
||||
@@ -104,21 +102,38 @@ class CeleryManager:
|
||||
if self.download_worker_process and self.download_worker_process.poll() is None:
|
||||
logger.info("Celery Download Worker is already running.")
|
||||
else:
|
||||
self.concurrency = get_config_params().get('maxConcurrentDownloads', self.concurrency)
|
||||
self.concurrency = get_config_params().get(
|
||||
"maxConcurrentDownloads", self.concurrency
|
||||
)
|
||||
download_cmd = self._get_worker_command(
|
||||
queues="downloads",
|
||||
concurrency=self.concurrency,
|
||||
worker_name_suffix="dlw" # Download Worker
|
||||
worker_name_suffix="dlw", # Download Worker
|
||||
)
|
||||
logger.info(
|
||||
f"Starting Celery Download Worker with command: {' '.join(download_cmd)}"
|
||||
)
|
||||
logger.info(f"Starting Celery Download Worker with command: {' '.join(download_cmd)}")
|
||||
self.download_worker_process = subprocess.Popen(
|
||||
download_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True
|
||||
download_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
universal_newlines=True,
|
||||
)
|
||||
self.download_log_thread_stdout = threading.Thread(
|
||||
target=self._process_output_reader,
|
||||
args=(self.download_worker_process.stdout, "Celery[DW-STDOUT]"),
|
||||
)
|
||||
self.download_log_thread_stderr = threading.Thread(
|
||||
target=self._process_output_reader,
|
||||
args=(self.download_worker_process.stderr, "Celery[DW-STDERR]", True),
|
||||
)
|
||||
self.download_log_thread_stdout = threading.Thread(target=self._process_output_reader, args=(self.download_worker_process.stdout, "Celery[DW-STDOUT]"))
|
||||
self.download_log_thread_stderr = threading.Thread(target=self._process_output_reader, args=(self.download_worker_process.stderr, "Celery[DW-STDERR]", True))
|
||||
self.download_log_thread_stdout.start()
|
||||
self.download_log_thread_stderr.start()
|
||||
logger.info(f"Celery Download Worker (PID: {self.download_worker_process.pid}) started with concurrency {self.concurrency}.")
|
||||
logger.info(
|
||||
f"Celery Download Worker (PID: {self.download_worker_process.pid}) started with concurrency {self.concurrency}."
|
||||
)
|
||||
|
||||
# Start Utility Worker
|
||||
if self.utility_worker_process and self.utility_worker_process.poll() is None:
|
||||
@@ -127,86 +142,160 @@ class CeleryManager:
|
||||
utility_cmd = self._get_worker_command(
|
||||
queues="utility_tasks,default", # Listen to utility and default
|
||||
concurrency=3,
|
||||
worker_name_suffix="utw" # Utility Worker
|
||||
worker_name_suffix="utw", # Utility Worker
|
||||
)
|
||||
logger.info(
|
||||
f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}"
|
||||
)
|
||||
logger.info(f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}")
|
||||
self.utility_worker_process = subprocess.Popen(
|
||||
utility_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True
|
||||
utility_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
universal_newlines=True,
|
||||
)
|
||||
self.utility_log_thread_stdout = threading.Thread(
|
||||
target=self._process_output_reader,
|
||||
args=(self.utility_worker_process.stdout, "Celery[UW-STDOUT]"),
|
||||
)
|
||||
self.utility_log_thread_stderr = threading.Thread(
|
||||
target=self._process_output_reader,
|
||||
args=(self.utility_worker_process.stderr, "Celery[UW-STDERR]", True),
|
||||
)
|
||||
self.utility_log_thread_stdout = threading.Thread(target=self._process_output_reader, args=(self.utility_worker_process.stdout, "Celery[UW-STDOUT]"))
|
||||
self.utility_log_thread_stderr = threading.Thread(target=self._process_output_reader, args=(self.utility_worker_process.stderr, "Celery[UW-STDERR]", True))
|
||||
self.utility_log_thread_stdout.start()
|
||||
self.utility_log_thread_stderr.start()
|
||||
logger.info(f"Celery Utility Worker (PID: {self.utility_worker_process.pid}) started with concurrency 3.")
|
||||
logger.info(
|
||||
f"Celery Utility Worker (PID: {self.utility_worker_process.pid}) started with concurrency 3."
|
||||
)
|
||||
|
||||
if self.config_monitor_thread is None or not self.config_monitor_thread.is_alive():
|
||||
self.config_monitor_thread = threading.Thread(target=self._monitor_config_changes)
|
||||
self.config_monitor_thread.daemon = True # Allow main program to exit even if this thread is running
|
||||
if (
|
||||
self.config_monitor_thread is None
|
||||
or not self.config_monitor_thread.is_alive()
|
||||
):
|
||||
self.config_monitor_thread = threading.Thread(
|
||||
target=self._monitor_config_changes
|
||||
)
|
||||
self.config_monitor_thread.daemon = (
|
||||
True # Allow main program to exit even if this thread is running
|
||||
)
|
||||
self.config_monitor_thread.start()
|
||||
logger.info("CeleryManager: Config monitor thread started.")
|
||||
else:
|
||||
logger.info("CeleryManager: Config monitor thread already running.")
|
||||
|
||||
def _monitor_config_changes(self):
|
||||
logger.info("CeleryManager: Config monitor thread active, monitoring configuration changes...")
|
||||
logger.info(
|
||||
"CeleryManager: Config monitor thread active, monitoring configuration changes..."
|
||||
)
|
||||
while not self.stop_event.is_set():
|
||||
try:
|
||||
time.sleep(10) # Check every 10 seconds
|
||||
if self.stop_event.is_set(): break
|
||||
if self.stop_event.is_set():
|
||||
break
|
||||
|
||||
current_config = get_config_params()
|
||||
new_max_concurrent_downloads = current_config.get('maxConcurrentDownloads', self.concurrency)
|
||||
new_max_concurrent_downloads = current_config.get(
|
||||
"maxConcurrentDownloads", self.concurrency
|
||||
)
|
||||
|
||||
if new_max_concurrent_downloads != self.concurrency:
|
||||
logger.info(f"CeleryManager: Detected change in maxConcurrentDownloads from {self.concurrency} to {new_max_concurrent_downloads}. Restarting download worker only.")
|
||||
logger.info(
|
||||
f"CeleryManager: Detected change in maxConcurrentDownloads from {self.concurrency} to {new_max_concurrent_downloads}. Restarting download worker only."
|
||||
)
|
||||
|
||||
# Stop only the download worker
|
||||
if self.download_worker_process and self.download_worker_process.poll() is None:
|
||||
logger.info(f"Stopping Celery Download Worker (PID: {self.download_worker_process.pid}) for config update...")
|
||||
if (
|
||||
self.download_worker_process
|
||||
and self.download_worker_process.poll() is None
|
||||
):
|
||||
logger.info(
|
||||
f"Stopping Celery Download Worker (PID: {self.download_worker_process.pid}) for config update..."
|
||||
)
|
||||
self.download_worker_process.terminate()
|
||||
try:
|
||||
self.download_worker_process.wait(timeout=10)
|
||||
logger.info(f"Celery Download Worker (PID: {self.download_worker_process.pid}) terminated.")
|
||||
logger.info(
|
||||
f"Celery Download Worker (PID: {self.download_worker_process.pid}) terminated."
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning(f"Celery Download Worker (PID: {self.download_worker_process.pid}) did not terminate gracefully, killing.")
|
||||
logger.warning(
|
||||
f"Celery Download Worker (PID: {self.download_worker_process.pid}) did not terminate gracefully, killing."
|
||||
)
|
||||
self.download_worker_process.kill()
|
||||
self.download_worker_process = None
|
||||
|
||||
# Wait for log threads of download worker to finish
|
||||
if self.download_log_thread_stdout and self.download_log_thread_stdout.is_alive():
|
||||
if (
|
||||
self.download_log_thread_stdout
|
||||
and self.download_log_thread_stdout.is_alive()
|
||||
):
|
||||
self.download_log_thread_stdout.join(timeout=5)
|
||||
if self.download_log_thread_stderr and self.download_log_thread_stderr.is_alive():
|
||||
if (
|
||||
self.download_log_thread_stderr
|
||||
and self.download_log_thread_stderr.is_alive()
|
||||
):
|
||||
self.download_log_thread_stderr.join(timeout=5)
|
||||
|
||||
self.concurrency = new_max_concurrent_downloads
|
||||
|
||||
# Restart only the download worker
|
||||
download_cmd = self._get_worker_command("downloads", self.concurrency, "dlw")
|
||||
logger.info(f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}")
|
||||
self.download_worker_process = subprocess.Popen(
|
||||
download_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True
|
||||
download_cmd = self._get_worker_command(
|
||||
"downloads", self.concurrency, "dlw"
|
||||
)
|
||||
logger.info(
|
||||
f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}"
|
||||
)
|
||||
self.download_worker_process = subprocess.Popen(
|
||||
download_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
universal_newlines=True,
|
||||
)
|
||||
self.download_log_thread_stdout = threading.Thread(
|
||||
target=self._process_output_reader,
|
||||
args=(self.download_worker_process.stdout, "Celery[DW-STDOUT]"),
|
||||
)
|
||||
self.download_log_thread_stderr = threading.Thread(
|
||||
target=self._process_output_reader,
|
||||
args=(
|
||||
self.download_worker_process.stderr,
|
||||
"Celery[DW-STDERR]",
|
||||
True,
|
||||
),
|
||||
)
|
||||
self.download_log_thread_stdout = threading.Thread(target=self._process_output_reader, args=(self.download_worker_process.stdout, "Celery[DW-STDOUT]"))
|
||||
self.download_log_thread_stderr = threading.Thread(target=self._process_output_reader, args=(self.download_worker_process.stderr, "Celery[DW-STDERR]", True))
|
||||
self.download_log_thread_stdout.start()
|
||||
self.download_log_thread_stderr.start()
|
||||
logger.info(f"Celery Download Worker (PID: {self.download_worker_process.pid}) restarted with new concurrency {self.concurrency}.")
|
||||
logger.info(
|
||||
f"Celery Download Worker (PID: {self.download_worker_process.pid}) restarted with new concurrency {self.concurrency}."
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"CeleryManager: Error in config monitor thread: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"CeleryManager: Error in config monitor thread: {e}", exc_info=True
|
||||
)
|
||||
# Avoid busy-looping on continuous errors
|
||||
if not self.stop_event.is_set(): time.sleep(30)
|
||||
if not self.stop_event.is_set():
|
||||
time.sleep(30)
|
||||
logger.info("CeleryManager: Config monitor thread stopped.")
|
||||
|
||||
def _stop_worker_process(self, worker_process, worker_name):
|
||||
if worker_process and worker_process.poll() is None:
|
||||
logger.info(f"Terminating Celery {worker_name} Worker (PID: {worker_process.pid})...")
|
||||
logger.info(
|
||||
f"Terminating Celery {worker_name} Worker (PID: {worker_process.pid})..."
|
||||
)
|
||||
worker_process.terminate()
|
||||
try:
|
||||
worker_process.wait(timeout=10)
|
||||
logger.info(f"Celery {worker_name} Worker (PID: {worker_process.pid}) terminated.")
|
||||
logger.info(
|
||||
f"Celery {worker_name} Worker (PID: {worker_process.pid}) terminated."
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning(f"Celery {worker_name} Worker (PID: {worker_process.pid}) did not terminate gracefully, killing.")
|
||||
logger.warning(
|
||||
f"Celery {worker_name} Worker (PID: {worker_process.pid}) did not terminate gracefully, killing."
|
||||
)
|
||||
worker_process.kill()
|
||||
return None # Set process to None after stopping
|
||||
|
||||
@@ -215,18 +304,28 @@ class CeleryManager:
|
||||
self.stop_event.set() # Signal all threads to stop
|
||||
|
||||
# Stop download worker
|
||||
self.download_worker_process = self._stop_worker_process(self.download_worker_process, "Download")
|
||||
self.download_worker_process = self._stop_worker_process(
|
||||
self.download_worker_process, "Download"
|
||||
)
|
||||
|
||||
# Stop utility worker
|
||||
self.utility_worker_process = self._stop_worker_process(self.utility_worker_process, "Utility")
|
||||
self.utility_worker_process = self._stop_worker_process(
|
||||
self.utility_worker_process, "Utility"
|
||||
)
|
||||
|
||||
logger.info("Joining log threads...")
|
||||
thread_timeout = 5 # seconds to wait for log threads
|
||||
|
||||
# Join download worker log threads
|
||||
if self.download_log_thread_stdout and self.download_log_thread_stdout.is_alive():
|
||||
if (
|
||||
self.download_log_thread_stdout
|
||||
and self.download_log_thread_stdout.is_alive()
|
||||
):
|
||||
self.download_log_thread_stdout.join(timeout=thread_timeout)
|
||||
if self.download_log_thread_stderr and self.download_log_thread_stderr.is_alive():
|
||||
if (
|
||||
self.download_log_thread_stderr
|
||||
and self.download_log_thread_stderr.is_alive()
|
||||
):
|
||||
self.download_log_thread_stderr.join(timeout=thread_timeout)
|
||||
|
||||
# Join utility worker log threads
|
||||
@@ -239,7 +338,9 @@ class CeleryManager:
|
||||
logger.info("Joining config_monitor_thread...")
|
||||
self.config_monitor_thread.join(timeout=thread_timeout)
|
||||
|
||||
logger.info("CeleryManager: All workers and threads signaled to stop and joined.")
|
||||
logger.info(
|
||||
"CeleryManager: All workers and threads signaled to stop and joined."
|
||||
)
|
||||
|
||||
def restart(self):
|
||||
logger.info("CeleryManager: Restarting all Celery workers...")
|
||||
@@ -250,12 +351,16 @@ class CeleryManager:
|
||||
self.start()
|
||||
logger.info("CeleryManager: All Celery workers restarted.")
|
||||
|
||||
|
||||
# Global instance for managing Celery workers
|
||||
celery_manager = CeleryManager()
|
||||
|
||||
# Example of how to use the manager (typically called from your main app script)
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] [%(threadName)s] [%(name)s] - %(message)s')
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s [%(levelname)s] [%(threadName)s] [%(name)s] - %(message)s",
|
||||
)
|
||||
logger.info("Starting Celery Manager example...")
|
||||
celery_manager.start()
|
||||
try:
|
||||
|
||||
@@ -1,33 +1,29 @@
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
from routes.utils.celery_tasks import (
|
||||
celery_app,
|
||||
download_track,
|
||||
download_album,
|
||||
download_playlist,
|
||||
store_task_status,
|
||||
store_task_info,
|
||||
get_task_info,
|
||||
get_task_status,
|
||||
get_last_task_status,
|
||||
cancel_task as cancel_celery_task,
|
||||
retry_task as retry_celery_task,
|
||||
get_all_tasks,
|
||||
ProgressState
|
||||
ProgressState,
|
||||
)
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Load configuration
|
||||
CONFIG_PATH = './data/config/main.json'
|
||||
CONFIG_PATH = "./data/config/main.json"
|
||||
try:
|
||||
with open(CONFIG_PATH, 'r') as f:
|
||||
with open(CONFIG_PATH, "r") as f:
|
||||
config_data = json.load(f)
|
||||
MAX_CONCURRENT_DL = config_data.get("maxConcurrentDownloads", 3)
|
||||
except Exception as e:
|
||||
@@ -35,6 +31,7 @@ except Exception as e:
|
||||
# Fallback default
|
||||
MAX_CONCURRENT_DL = 3
|
||||
|
||||
|
||||
def get_config_params():
|
||||
"""
|
||||
Get common download parameters from the config file.
|
||||
@@ -44,47 +41,48 @@ def get_config_params():
|
||||
dict: A dictionary containing common parameters from config
|
||||
"""
|
||||
try:
|
||||
with open(CONFIG_PATH, 'r') as f:
|
||||
with open(CONFIG_PATH, "r") as f:
|
||||
config = json.load(f)
|
||||
|
||||
return {
|
||||
'spotify': config.get('spotify', ''),
|
||||
'deezer': config.get('deezer', ''),
|
||||
'fallback': config.get('fallback', False),
|
||||
'spotifyQuality': config.get('spotifyQuality', 'NORMAL'),
|
||||
'deezerQuality': config.get('deezerQuality', 'MP3_128'),
|
||||
'realTime': config.get('realTime', False),
|
||||
'customDirFormat': config.get('customDirFormat', '%ar_album%/%album%'),
|
||||
'customTrackFormat': config.get('customTrackFormat', '%tracknum%. %music%'),
|
||||
'tracknum_padding': config.get('tracknum_padding', True),
|
||||
'save_cover': config.get('save_cover', True),
|
||||
'maxRetries': config.get('maxRetries', 3),
|
||||
'retryDelaySeconds': config.get('retryDelaySeconds', 5),
|
||||
'retry_delay_increase': config.get('retry_delay_increase', 5),
|
||||
'convertTo': config.get('convertTo', None),
|
||||
'bitrate': config.get('bitrate', None)
|
||||
"spotify": config.get("spotify", ""),
|
||||
"deezer": config.get("deezer", ""),
|
||||
"fallback": config.get("fallback", False),
|
||||
"spotifyQuality": config.get("spotifyQuality", "NORMAL"),
|
||||
"deezerQuality": config.get("deezerQuality", "MP3_128"),
|
||||
"realTime": config.get("realTime", False),
|
||||
"customDirFormat": config.get("customDirFormat", "%ar_album%/%album%"),
|
||||
"customTrackFormat": config.get("customTrackFormat", "%tracknum%. %music%"),
|
||||
"tracknum_padding": config.get("tracknum_padding", True),
|
||||
"save_cover": config.get("save_cover", True),
|
||||
"maxRetries": config.get("maxRetries", 3),
|
||||
"retryDelaySeconds": config.get("retryDelaySeconds", 5),
|
||||
"retry_delay_increase": config.get("retry_delay_increase", 5),
|
||||
"convertTo": config.get("convertTo", None),
|
||||
"bitrate": config.get("bitrate", None),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading config for parameters: {e}")
|
||||
# Return defaults if config read fails
|
||||
return {
|
||||
'spotify': '',
|
||||
'deezer': '',
|
||||
'fallback': False,
|
||||
'spotifyQuality': 'NORMAL',
|
||||
'deezerQuality': 'MP3_128',
|
||||
'realTime': False,
|
||||
'customDirFormat': '%ar_album%/%album%',
|
||||
'customTrackFormat': '%tracknum%. %music%',
|
||||
'tracknum_padding': True,
|
||||
'save_cover': True,
|
||||
'maxRetries': 3,
|
||||
'retryDelaySeconds': 5,
|
||||
'retry_delay_increase': 5,
|
||||
'convertTo': None, # Default for conversion
|
||||
'bitrate': None # Default for bitrate
|
||||
"spotify": "",
|
||||
"deezer": "",
|
||||
"fallback": False,
|
||||
"spotifyQuality": "NORMAL",
|
||||
"deezerQuality": "MP3_128",
|
||||
"realTime": False,
|
||||
"customDirFormat": "%ar_album%/%album%",
|
||||
"customTrackFormat": "%tracknum%. %music%",
|
||||
"tracknum_padding": True,
|
||||
"save_cover": True,
|
||||
"maxRetries": 3,
|
||||
"retryDelaySeconds": 5,
|
||||
"retry_delay_increase": 5,
|
||||
"convertTo": None, # Default for conversion
|
||||
"bitrate": None, # Default for bitrate
|
||||
}
|
||||
|
||||
|
||||
class CeleryDownloadQueueManager:
|
||||
"""
|
||||
Manages a queue of download tasks using Celery.
|
||||
@@ -98,7 +96,9 @@ class CeleryDownloadQueueManager:
|
||||
"""Initialize the Celery-based download queue manager"""
|
||||
self.max_concurrent = MAX_CONCURRENT_DL
|
||||
self.paused = False
|
||||
print(f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}")
|
||||
print(
|
||||
f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}"
|
||||
)
|
||||
|
||||
def add_task(self, task: dict, from_watch_job: bool = False):
|
||||
"""
|
||||
@@ -121,14 +121,16 @@ class CeleryDownloadQueueManager:
|
||||
incoming_type = task.get("download_type", "unknown")
|
||||
|
||||
if not incoming_url:
|
||||
logger.warning("Task being added with no URL. Duplicate check might be unreliable.")
|
||||
logger.warning(
|
||||
"Task being added with no URL. Duplicate check might be unreliable."
|
||||
)
|
||||
|
||||
NON_BLOCKING_STATES = [
|
||||
ProgressState.COMPLETE,
|
||||
ProgressState.CANCELLED,
|
||||
ProgressState.ERROR,
|
||||
ProgressState.ERROR_RETRIED,
|
||||
ProgressState.ERROR_AUTO_CLEANED
|
||||
ProgressState.ERROR_AUTO_CLEANED,
|
||||
]
|
||||
|
||||
all_existing_tasks_summary = get_all_tasks()
|
||||
@@ -148,15 +150,18 @@ class CeleryDownloadQueueManager:
|
||||
existing_type = existing_task_info.get("download_type")
|
||||
existing_status = existing_last_status_obj.get("status")
|
||||
|
||||
if (existing_url == incoming_url and
|
||||
existing_type == incoming_type and
|
||||
existing_status not in NON_BLOCKING_STATES):
|
||||
|
||||
if (
|
||||
existing_url == incoming_url
|
||||
and existing_type == incoming_type
|
||||
and existing_status not in NON_BLOCKING_STATES
|
||||
):
|
||||
message = f"Duplicate download: URL '{incoming_url}' (type: {incoming_type}) is already being processed by task {existing_task_id} (status: {existing_status})."
|
||||
logger.warning(message)
|
||||
|
||||
if from_watch_job:
|
||||
logger.info(f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}.")
|
||||
logger.info(
|
||||
f"Task from watch job for {incoming_url} not queued due to active duplicate {existing_task_id}."
|
||||
)
|
||||
return None # Skip execution for watch jobs
|
||||
else:
|
||||
# Create a new task_id for this duplicate request and mark it as an error
|
||||
@@ -167,9 +172,11 @@ class CeleryDownloadQueueManager:
|
||||
"name": task.get("name", "Duplicate Task"),
|
||||
"artist": task.get("artist", ""),
|
||||
"url": incoming_url,
|
||||
"original_request": task.get("orig_request", task.get("original_request", {})),
|
||||
"original_request": task.get(
|
||||
"orig_request", task.get("original_request", {})
|
||||
),
|
||||
"created_at": time.time(),
|
||||
"is_duplicate_error_task": True
|
||||
"is_duplicate_error_task": True,
|
||||
}
|
||||
store_task_info(error_task_id, error_task_info_payload)
|
||||
error_status_payload = {
|
||||
@@ -179,14 +186,16 @@ class CeleryDownloadQueueManager:
|
||||
"timestamp": time.time(),
|
||||
"type": error_task_info_payload["type"],
|
||||
"name": error_task_info_payload["name"],
|
||||
"artist": error_task_info_payload["artist"]
|
||||
"artist": error_task_info_payload["artist"],
|
||||
}
|
||||
store_task_status(error_task_id, error_status_payload)
|
||||
return error_task_id # Return the ID of this new error-state task
|
||||
|
||||
task_id = str(uuid.uuid4())
|
||||
config_params = get_config_params()
|
||||
original_request = task.get("orig_request", task.get("original_request", {}))
|
||||
original_request = task.get(
|
||||
"orig_request", task.get("original_request", {})
|
||||
)
|
||||
|
||||
complete_task = {
|
||||
"download_type": incoming_type,
|
||||
@@ -195,21 +204,42 @@ class CeleryDownloadQueueManager:
|
||||
"artist": task.get("artist", ""),
|
||||
"url": task.get("url", ""),
|
||||
"retry_url": task.get("retry_url", ""),
|
||||
"main": original_request.get("main", config_params['deezer']),
|
||||
"fallback": original_request.get("fallback",
|
||||
config_params['spotify'] if config_params['fallback'] else None),
|
||||
"quality": original_request.get("quality", config_params['deezerQuality']),
|
||||
"fall_quality": original_request.get("fall_quality", config_params['spotifyQuality']),
|
||||
"real_time": self._parse_bool_param(original_request.get("real_time"), config_params['realTime']),
|
||||
"custom_dir_format": original_request.get("custom_dir_format", config_params['customDirFormat']),
|
||||
"custom_track_format": original_request.get("custom_track_format", config_params['customTrackFormat']),
|
||||
"pad_tracks": self._parse_bool_param(original_request.get("tracknum_padding"), config_params['tracknum_padding']),
|
||||
"save_cover": self._parse_bool_param(original_request.get("save_cover"), config_params['save_cover']),
|
||||
"convertTo": original_request.get("convertTo", config_params.get('convertTo')),
|
||||
"bitrate": original_request.get("bitrate", config_params.get('bitrate')),
|
||||
"main": original_request.get("main", config_params["deezer"]),
|
||||
"fallback": original_request.get(
|
||||
"fallback",
|
||||
config_params["spotify"] if config_params["fallback"] else None,
|
||||
),
|
||||
"quality": original_request.get(
|
||||
"quality", config_params["deezerQuality"]
|
||||
),
|
||||
"fall_quality": original_request.get(
|
||||
"fall_quality", config_params["spotifyQuality"]
|
||||
),
|
||||
"real_time": self._parse_bool_param(
|
||||
original_request.get("real_time"), config_params["realTime"]
|
||||
),
|
||||
"custom_dir_format": original_request.get(
|
||||
"custom_dir_format", config_params["customDirFormat"]
|
||||
),
|
||||
"custom_track_format": original_request.get(
|
||||
"custom_track_format", config_params["customTrackFormat"]
|
||||
),
|
||||
"pad_tracks": self._parse_bool_param(
|
||||
original_request.get("tracknum_padding"),
|
||||
config_params["tracknum_padding"],
|
||||
),
|
||||
"save_cover": self._parse_bool_param(
|
||||
original_request.get("save_cover"), config_params["save_cover"]
|
||||
),
|
||||
"convertTo": original_request.get(
|
||||
"convertTo", config_params.get("convertTo")
|
||||
),
|
||||
"bitrate": original_request.get(
|
||||
"bitrate", config_params.get("bitrate")
|
||||
),
|
||||
"retry_count": 0,
|
||||
"original_request": original_request,
|
||||
"created_at": time.time()
|
||||
"created_at": time.time(),
|
||||
}
|
||||
|
||||
# If from_watch_job is True, ensure track_details_for_db is passed through
|
||||
@@ -217,20 +247,23 @@ class CeleryDownloadQueueManager:
|
||||
complete_task["track_details_for_db"] = task["track_details_for_db"]
|
||||
|
||||
store_task_info(task_id, complete_task)
|
||||
store_task_status(task_id, {
|
||||
store_task_status(
|
||||
task_id,
|
||||
{
|
||||
"status": ProgressState.QUEUED,
|
||||
"timestamp": time.time(),
|
||||
"type": complete_task["type"],
|
||||
"name": complete_task["name"],
|
||||
"artist": complete_task["artist"],
|
||||
"retry_count": 0,
|
||||
"queue_position": len(get_all_tasks()) + 1
|
||||
})
|
||||
"queue_position": len(get_all_tasks()) + 1,
|
||||
},
|
||||
)
|
||||
|
||||
celery_task_map = {
|
||||
"track": download_track,
|
||||
"album": download_album,
|
||||
"playlist": download_playlist
|
||||
"playlist": download_playlist,
|
||||
}
|
||||
|
||||
task_func = celery_task_map.get(incoming_type)
|
||||
@@ -238,30 +271,38 @@ class CeleryDownloadQueueManager:
|
||||
task_func.apply_async(
|
||||
kwargs=complete_task,
|
||||
task_id=task_id,
|
||||
countdown=0 if not self.paused else 3600
|
||||
countdown=0 if not self.paused else 3600,
|
||||
)
|
||||
logger.info(
|
||||
f"Added {incoming_type} download task {task_id} to Celery queue."
|
||||
)
|
||||
logger.info(f"Added {incoming_type} download task {task_id} to Celery queue.")
|
||||
return task_id
|
||||
else:
|
||||
store_task_status(task_id, {
|
||||
store_task_status(
|
||||
task_id,
|
||||
{
|
||||
"status": ProgressState.ERROR,
|
||||
"message": f"Unsupported download type: {incoming_type}",
|
||||
"timestamp": time.time()
|
||||
})
|
||||
"timestamp": time.time(),
|
||||
},
|
||||
)
|
||||
logger.error(f"Unsupported download type: {incoming_type}")
|
||||
return task_id
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding task to Celery queue: {e}", exc_info=True)
|
||||
error_task_id = str(uuid.uuid4())
|
||||
store_task_status(error_task_id, {
|
||||
store_task_status(
|
||||
error_task_id,
|
||||
{
|
||||
"status": ProgressState.ERROR,
|
||||
"message": f"Error adding task to queue: {str(e)}",
|
||||
"timestamp": time.time(),
|
||||
"type": task.get("type", "unknown"),
|
||||
"name": task.get("name", "Unknown"),
|
||||
"artist": task.get("artist", "")
|
||||
})
|
||||
"artist": task.get("artist", ""),
|
||||
},
|
||||
)
|
||||
return error_task_id
|
||||
|
||||
def _parse_bool_param(self, param_value, default_value=False):
|
||||
@@ -271,7 +312,7 @@ class CeleryDownloadQueueManager:
|
||||
if isinstance(param_value, bool):
|
||||
return param_value
|
||||
if isinstance(param_value, str):
|
||||
return param_value.lower() in ['true', '1', 'yes', 'y', 'on']
|
||||
return param_value.lower() in ["true", "1", "yes", "y", "on"]
|
||||
return bool(param_value)
|
||||
|
||||
def cancel_task(self, task_id):
|
||||
@@ -321,7 +362,7 @@ class CeleryDownloadQueueManager:
|
||||
return {
|
||||
"status": "all_cancelled",
|
||||
"cancelled_count": cancelled_count,
|
||||
"total_tasks": len(tasks)
|
||||
"total_tasks": len(tasks),
|
||||
}
|
||||
|
||||
def get_queue_status(self):
|
||||
@@ -346,32 +387,35 @@ class CeleryDownloadQueueManager:
|
||||
|
||||
if status == ProgressState.PROCESSING:
|
||||
running_count += 1
|
||||
running_tasks.append({
|
||||
running_tasks.append(
|
||||
{
|
||||
"task_id": task.get("task_id"),
|
||||
"name": task.get("name", "Unknown"),
|
||||
"type": task.get("type", "unknown"),
|
||||
"download_type": task.get("download_type", "unknown")
|
||||
})
|
||||
"download_type": task.get("download_type", "unknown"),
|
||||
}
|
||||
)
|
||||
elif status == ProgressState.QUEUED:
|
||||
pending_count += 1
|
||||
elif status == ProgressState.ERROR:
|
||||
failed_count += 1
|
||||
|
||||
# Get task info for retry information
|
||||
task_info = get_task_info(task.get("task_id"))
|
||||
last_status = get_last_task_status(task.get("task_id"))
|
||||
|
||||
retry_count = 0
|
||||
if last_status:
|
||||
retry_count = last_status.get("retry_count", 0)
|
||||
|
||||
failed_tasks.append({
|
||||
failed_tasks.append(
|
||||
{
|
||||
"task_id": task.get("task_id"),
|
||||
"name": task.get("name", "Unknown"),
|
||||
"type": task.get("type", "unknown"),
|
||||
"download_type": task.get("download_type", "unknown"),
|
||||
"retry_count": retry_count
|
||||
})
|
||||
"retry_count": retry_count,
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"running": running_count,
|
||||
@@ -380,7 +424,7 @@ class CeleryDownloadQueueManager:
|
||||
"max_concurrent": self.max_concurrent,
|
||||
"paused": self.paused,
|
||||
"running_tasks": running_tasks,
|
||||
"failed_tasks": failed_tasks
|
||||
"failed_tasks": failed_tasks,
|
||||
}
|
||||
|
||||
def pause(self):
|
||||
@@ -392,12 +436,15 @@ class CeleryDownloadQueueManager:
|
||||
for task in tasks:
|
||||
if task.get("status") == ProgressState.QUEUED:
|
||||
# Update status to indicate the task is paused
|
||||
store_task_status(task.get("task_id"), {
|
||||
store_task_status(
|
||||
task.get("task_id"),
|
||||
{
|
||||
"status": ProgressState.QUEUED,
|
||||
"paused": True,
|
||||
"message": "Queue is paused, task will run when queue is resumed",
|
||||
"timestamp": time.time()
|
||||
})
|
||||
"timestamp": time.time(),
|
||||
},
|
||||
)
|
||||
|
||||
logger.info("Download queue processing paused")
|
||||
return {"status": "paused"}
|
||||
@@ -418,31 +465,25 @@ class CeleryDownloadQueueManager:
|
||||
continue
|
||||
|
||||
# Update status to indicate the task is no longer paused
|
||||
store_task_status(task_id, {
|
||||
store_task_status(
|
||||
task_id,
|
||||
{
|
||||
"status": ProgressState.QUEUED,
|
||||
"paused": False,
|
||||
"message": "Queue resumed, task will run soon",
|
||||
"timestamp": time.time()
|
||||
})
|
||||
"timestamp": time.time(),
|
||||
},
|
||||
)
|
||||
|
||||
# Reschedule the task to run immediately
|
||||
download_type = task_info.get("download_type", "unknown")
|
||||
|
||||
if download_type == "track":
|
||||
download_track.apply_async(
|
||||
kwargs=task_info,
|
||||
task_id=task_id
|
||||
)
|
||||
download_track.apply_async(kwargs=task_info, task_id=task_id)
|
||||
elif download_type == "album":
|
||||
download_album.apply_async(
|
||||
kwargs=task_info,
|
||||
task_id=task_id
|
||||
)
|
||||
download_album.apply_async(kwargs=task_info, task_id=task_id)
|
||||
elif download_type == "playlist":
|
||||
download_playlist.apply_async(
|
||||
kwargs=task_info,
|
||||
task_id=task_id
|
||||
)
|
||||
download_playlist.apply_async(kwargs=task_info, task_id=task_id)
|
||||
|
||||
logger.info("Download queue processing resumed")
|
||||
return {"status": "resumed"}
|
||||
@@ -462,5 +503,6 @@ class CeleryDownloadQueueManager:
|
||||
logger.info("Celery Download Queue Manager stopped")
|
||||
return {"status": "stopped"}
|
||||
|
||||
|
||||
# Create the global instance
|
||||
download_queue_manager = CeleryDownloadQueueManager()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,6 @@ import json
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
import sqlite3
|
||||
import traceback # For logging detailed error messages
|
||||
import time # For retry delays
|
||||
import logging
|
||||
|
||||
@@ -14,17 +13,17 @@ import logging
|
||||
logger = logging.getLogger(__name__) # Assuming logger is configured elsewhere
|
||||
|
||||
# --- New Database and Path Definitions ---
|
||||
CREDS_BASE_DIR = Path('./data/creds')
|
||||
ACCOUNTS_DB_PATH = CREDS_BASE_DIR / 'accounts.db'
|
||||
BLOBS_DIR = CREDS_BASE_DIR / 'blobs'
|
||||
GLOBAL_SEARCH_JSON_PATH = CREDS_BASE_DIR / 'search.json' # Global Spotify API creds
|
||||
CREDS_BASE_DIR = Path("./data/creds")
|
||||
ACCOUNTS_DB_PATH = CREDS_BASE_DIR / "accounts.db"
|
||||
BLOBS_DIR = CREDS_BASE_DIR / "blobs"
|
||||
GLOBAL_SEARCH_JSON_PATH = CREDS_BASE_DIR / "search.json" # Global Spotify API creds
|
||||
|
||||
EXPECTED_SPOTIFY_TABLE_COLUMNS = {
|
||||
"name": "TEXT PRIMARY KEY",
|
||||
# client_id and client_secret are now global
|
||||
"region": "TEXT", # ISO 3166-1 alpha-2
|
||||
"created_at": "REAL",
|
||||
"updated_at": "REAL"
|
||||
"updated_at": "REAL",
|
||||
}
|
||||
|
||||
EXPECTED_DEEZER_TABLE_COLUMNS = {
|
||||
@@ -32,9 +31,10 @@ EXPECTED_DEEZER_TABLE_COLUMNS = {
|
||||
"arl": "TEXT",
|
||||
"region": "TEXT", # ISO 3166-1 alpha-2
|
||||
"created_at": "REAL",
|
||||
"updated_at": "REAL"
|
||||
"updated_at": "REAL",
|
||||
}
|
||||
|
||||
|
||||
def _get_db_connection():
|
||||
ACCOUNTS_DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
BLOBS_DIR.mkdir(parents=True, exist_ok=True) # Ensure blobs directory also exists
|
||||
@@ -42,7 +42,10 @@ def _get_db_connection():
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_columns: dict):
|
||||
|
||||
def _ensure_table_schema(
|
||||
cursor: sqlite3.Cursor, table_name: str, expected_columns: dict
|
||||
):
|
||||
"""Ensures the given table has all expected columns, adding them if necessary."""
|
||||
try:
|
||||
cursor.execute(f"PRAGMA table_info({table_name})")
|
||||
@@ -53,17 +56,21 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum
|
||||
for col_name, col_type in expected_columns.items():
|
||||
if col_name not in existing_column_names:
|
||||
# Basic protection against altering PK after creation if table is not empty
|
||||
if 'PRIMARY KEY' in col_type.upper() and existing_columns_info:
|
||||
if "PRIMARY KEY" in col_type.upper() and existing_columns_info:
|
||||
logger.warning(
|
||||
f"Column '{col_name}' is part of PRIMARY KEY for table '{table_name}' "
|
||||
f"and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN."
|
||||
)
|
||||
continue
|
||||
|
||||
col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip()
|
||||
col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip()
|
||||
try:
|
||||
cursor.execute(f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}")
|
||||
logger.info(f"Added missing column '{col_name} {col_type_for_add}' to table '{table_name}'.")
|
||||
cursor.execute(
|
||||
f"ALTER TABLE {table_name} ADD COLUMN {col_name} {col_type_for_add}"
|
||||
)
|
||||
logger.info(
|
||||
f"Added missing column '{col_name} {col_type_for_add}' to table '{table_name}'."
|
||||
)
|
||||
added_columns = True
|
||||
except sqlite3.OperationalError as alter_e:
|
||||
logger.warning(
|
||||
@@ -72,9 +79,12 @@ def _ensure_table_schema(cursor: sqlite3.Cursor, table_name: str, expected_colum
|
||||
)
|
||||
return added_columns
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error ensuring schema for table '{table_name}': {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error ensuring schema for table '{table_name}': {e}", exc_info=True
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def init_credentials_db():
|
||||
"""Initializes the accounts.db and its tables if they don't exist."""
|
||||
try:
|
||||
@@ -105,46 +115,72 @@ def init_credentials_db():
|
||||
|
||||
# Ensure global search.json exists, create if not
|
||||
if not GLOBAL_SEARCH_JSON_PATH.exists():
|
||||
logger.info(f"Global Spotify search credential file not found at {GLOBAL_SEARCH_JSON_PATH}. Creating empty file.")
|
||||
with open(GLOBAL_SEARCH_JSON_PATH, 'w') as f_search:
|
||||
json.dump({"client_id": "", "client_secret": ""}, f_search, indent=4)
|
||||
logger.info(
|
||||
f"Global Spotify search credential file not found at {GLOBAL_SEARCH_JSON_PATH}. Creating empty file."
|
||||
)
|
||||
with open(GLOBAL_SEARCH_JSON_PATH, "w") as f_search:
|
||||
json.dump(
|
||||
{"client_id": "", "client_secret": ""}, f_search, indent=4
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
logger.info(f"Credentials database initialized/schema checked at {ACCOUNTS_DB_PATH}")
|
||||
logger.info(
|
||||
f"Credentials database initialized/schema checked at {ACCOUNTS_DB_PATH}"
|
||||
)
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error initializing credentials database: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
def _get_global_spotify_api_creds():
|
||||
"""Loads client_id and client_secret from the global search.json."""
|
||||
if GLOBAL_SEARCH_JSON_PATH.exists():
|
||||
try:
|
||||
with open(GLOBAL_SEARCH_JSON_PATH, 'r') as f:
|
||||
with open(GLOBAL_SEARCH_JSON_PATH, "r") as f:
|
||||
search_data = json.load(f)
|
||||
client_id = search_data.get('client_id')
|
||||
client_secret = search_data.get('client_secret')
|
||||
client_id = search_data.get("client_id")
|
||||
client_secret = search_data.get("client_secret")
|
||||
if client_id and client_secret:
|
||||
return client_id, client_secret
|
||||
else:
|
||||
logger.warning(f"Global Spotify API credentials in {GLOBAL_SEARCH_JSON_PATH} are incomplete.")
|
||||
logger.warning(
|
||||
f"Global Spotify API credentials in {GLOBAL_SEARCH_JSON_PATH} are incomplete."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading global Spotify API credentials from {GLOBAL_SEARCH_JSON_PATH}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error reading global Spotify API credentials from {GLOBAL_SEARCH_JSON_PATH}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
else:
|
||||
logger.warning(f"Global Spotify API credential file {GLOBAL_SEARCH_JSON_PATH} not found.")
|
||||
return None, None # Return None if file doesn't exist or creds are incomplete/invalid
|
||||
logger.warning(
|
||||
f"Global Spotify API credential file {GLOBAL_SEARCH_JSON_PATH} not found."
|
||||
)
|
||||
return (
|
||||
None,
|
||||
None,
|
||||
) # Return None if file doesn't exist or creds are incomplete/invalid
|
||||
|
||||
|
||||
def save_global_spotify_api_creds(client_id: str, client_secret: str):
|
||||
"""Saves client_id and client_secret to the global search.json."""
|
||||
try:
|
||||
GLOBAL_SEARCH_JSON_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(GLOBAL_SEARCH_JSON_PATH, 'w') as f:
|
||||
json.dump({"client_id": client_id, "client_secret": client_secret}, f, indent=4)
|
||||
logger.info(f"Global Spotify API credentials saved to {GLOBAL_SEARCH_JSON_PATH}")
|
||||
with open(GLOBAL_SEARCH_JSON_PATH, "w") as f:
|
||||
json.dump(
|
||||
{"client_id": client_id, "client_secret": client_secret}, f, indent=4
|
||||
)
|
||||
logger.info(
|
||||
f"Global Spotify API credentials saved to {GLOBAL_SEARCH_JSON_PATH}"
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving global Spotify API credentials to {GLOBAL_SEARCH_JSON_PATH}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error saving global Spotify API credentials to {GLOBAL_SEARCH_JSON_PATH}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def _validate_with_retry(service_name, account_name, validation_data):
|
||||
"""
|
||||
Attempts to validate credentials with retries for connection errors.
|
||||
@@ -161,51 +197,76 @@ def _validate_with_retry(service_name, account_name, validation_data):
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
if service_name == 'spotify':
|
||||
if service_name == "spotify":
|
||||
# For Spotify, validation uses the account's blob and GLOBAL API creds
|
||||
global_client_id, global_client_secret = _get_global_spotify_api_creds()
|
||||
if not global_client_id or not global_client_secret:
|
||||
raise ValueError("Global Spotify API client_id or client_secret not configured for validation.")
|
||||
raise ValueError(
|
||||
"Global Spotify API client_id or client_secret not configured for validation."
|
||||
)
|
||||
|
||||
blob_file_path = validation_data.get('blob_file_path')
|
||||
blob_file_path = validation_data.get("blob_file_path")
|
||||
if not blob_file_path or not Path(blob_file_path).exists():
|
||||
raise ValueError(f"Spotify blob file missing for validation of account {account_name}")
|
||||
SpoLogin(credentials_path=str(blob_file_path), spotify_client_id=global_client_id, spotify_client_secret=global_client_secret)
|
||||
raise ValueError(
|
||||
f"Spotify blob file missing for validation of account {account_name}"
|
||||
)
|
||||
SpoLogin(
|
||||
credentials_path=str(blob_file_path),
|
||||
spotify_client_id=global_client_id,
|
||||
spotify_client_secret=global_client_secret,
|
||||
)
|
||||
else: # Deezer
|
||||
arl = validation_data.get('arl')
|
||||
arl = validation_data.get("arl")
|
||||
if not arl:
|
||||
raise ValueError("Missing 'arl' for Deezer validation.")
|
||||
DeeLogin(arl=arl)
|
||||
|
||||
logger.info(f"{service_name.capitalize()} credentials for {account_name} validated successfully (attempt {attempt + 1}).")
|
||||
logger.info(
|
||||
f"{service_name.capitalize()} credentials for {account_name} validated successfully (attempt {attempt + 1})."
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
error_str = str(e).lower()
|
||||
is_connection_error = (
|
||||
"connection refused" in error_str or "connection error" in error_str or
|
||||
"timeout" in error_str or "temporary failure in name resolution" in error_str or
|
||||
"dns lookup failed" in error_str or "network is unreachable" in error_str or
|
||||
"ssl handshake failed" in error_str or "connection reset by peer" in error_str
|
||||
"connection refused" in error_str
|
||||
or "connection error" in error_str
|
||||
or "timeout" in error_str
|
||||
or "temporary failure in name resolution" in error_str
|
||||
or "dns lookup failed" in error_str
|
||||
or "network is unreachable" in error_str
|
||||
or "ssl handshake failed" in error_str
|
||||
or "connection reset by peer" in error_str
|
||||
)
|
||||
|
||||
if is_connection_error and attempt < max_retries - 1:
|
||||
retry_delay = 2 + attempt
|
||||
logger.warning(f"Validation for {account_name} ({service_name}) failed (attempt {attempt + 1}) due to connection issue: {e}. Retrying in {retry_delay}s...")
|
||||
logger.warning(
|
||||
f"Validation for {account_name} ({service_name}) failed (attempt {attempt + 1}) due to connection issue: {e}. Retrying in {retry_delay}s..."
|
||||
)
|
||||
time.sleep(retry_delay)
|
||||
continue
|
||||
else:
|
||||
logger.error(f"Validation for {account_name} ({service_name}) failed on attempt {attempt + 1} (non-retryable or max retries).")
|
||||
logger.error(
|
||||
f"Validation for {account_name} ({service_name}) failed on attempt {attempt + 1} (non-retryable or max retries)."
|
||||
)
|
||||
break
|
||||
|
||||
if last_exception:
|
||||
base_error_message = str(last_exception).splitlines()[-1]
|
||||
detailed_error_message = f"Invalid {service_name} credentials for {account_name}. Verification failed: {base_error_message}"
|
||||
if service_name == 'spotify' and "incorrect padding" in base_error_message.lower():
|
||||
detailed_error_message += ". Hint: For Spotify, ensure the credentials blob content is correct."
|
||||
if (
|
||||
service_name == "spotify"
|
||||
and "incorrect padding" in base_error_message.lower()
|
||||
):
|
||||
detailed_error_message += (
|
||||
". Hint: For Spotify, ensure the credentials blob content is correct."
|
||||
)
|
||||
raise ValueError(detailed_error_message)
|
||||
else:
|
||||
raise ValueError(f"Invalid {service_name} credentials for {account_name}. Verification failed (unknown reason after retries).")
|
||||
raise ValueError(
|
||||
f"Invalid {service_name} credentials for {account_name}. Verification failed (unknown reason after retries)."
|
||||
)
|
||||
|
||||
|
||||
def create_credential(service, name, data):
|
||||
@@ -219,7 +280,7 @@ def create_credential(service, name, data):
|
||||
Raises:
|
||||
ValueError, FileExistsError
|
||||
"""
|
||||
if service not in ['spotify', 'deezer']:
|
||||
if service not in ["spotify", "deezer"]:
|
||||
raise ValueError("Service must be 'spotify' or 'deezer'")
|
||||
if not name or not isinstance(name, str):
|
||||
raise ValueError("Credential name must be a non-empty string.")
|
||||
@@ -230,43 +291,56 @@ def create_credential(service, name, data):
|
||||
cursor = conn.cursor()
|
||||
conn.row_factory = sqlite3.Row
|
||||
try:
|
||||
if service == 'spotify':
|
||||
required_fields = {'region', 'blob_content'} # client_id/secret are global
|
||||
if service == "spotify":
|
||||
required_fields = {
|
||||
"region",
|
||||
"blob_content",
|
||||
} # client_id/secret are global
|
||||
if not required_fields.issubset(data.keys()):
|
||||
raise ValueError(f"Missing fields for Spotify. Required: {required_fields}")
|
||||
raise ValueError(
|
||||
f"Missing fields for Spotify. Required: {required_fields}"
|
||||
)
|
||||
|
||||
blob_path = BLOBS_DIR / name / 'credentials.json'
|
||||
validation_data = {'blob_file_path': str(blob_path)} # Validation uses global API creds
|
||||
blob_path = BLOBS_DIR / name / "credentials.json"
|
||||
validation_data = {
|
||||
"blob_file_path": str(blob_path)
|
||||
} # Validation uses global API creds
|
||||
|
||||
blob_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(blob_path, 'w') as f_blob:
|
||||
if isinstance(data['blob_content'], dict):
|
||||
json.dump(data['blob_content'], f_blob, indent=4)
|
||||
with open(blob_path, "w") as f_blob:
|
||||
if isinstance(data["blob_content"], dict):
|
||||
json.dump(data["blob_content"], f_blob, indent=4)
|
||||
else: # assume string
|
||||
f_blob.write(data['blob_content'])
|
||||
f_blob.write(data["blob_content"])
|
||||
|
||||
try:
|
||||
_validate_with_retry('spotify', name, validation_data)
|
||||
_validate_with_retry("spotify", name, validation_data)
|
||||
cursor.execute(
|
||||
"INSERT INTO spotify (name, region, created_at, updated_at) VALUES (?, ?, ?, ?)",
|
||||
(name, data['region'], current_time, current_time)
|
||||
(name, data["region"], current_time, current_time),
|
||||
)
|
||||
except Exception as e:
|
||||
if blob_path.exists(): blob_path.unlink() # Cleanup blob
|
||||
if blob_path.parent.exists() and not any(blob_path.parent.iterdir()): blob_path.parent.rmdir()
|
||||
except Exception:
|
||||
if blob_path.exists():
|
||||
blob_path.unlink() # Cleanup blob
|
||||
if blob_path.parent.exists() and not any(
|
||||
blob_path.parent.iterdir()
|
||||
):
|
||||
blob_path.parent.rmdir()
|
||||
raise # Re-raise validation or DB error
|
||||
|
||||
elif service == 'deezer':
|
||||
required_fields = {'arl', 'region'}
|
||||
elif service == "deezer":
|
||||
required_fields = {"arl", "region"}
|
||||
if not required_fields.issubset(data.keys()):
|
||||
raise ValueError(f"Missing fields for Deezer. Required: {required_fields}")
|
||||
raise ValueError(
|
||||
f"Missing fields for Deezer. Required: {required_fields}"
|
||||
)
|
||||
|
||||
validation_data = {'arl': data['arl']}
|
||||
_validate_with_retry('deezer', name, validation_data)
|
||||
validation_data = {"arl": data["arl"]}
|
||||
_validate_with_retry("deezer", name, validation_data)
|
||||
|
||||
cursor.execute(
|
||||
"INSERT INTO deezer (name, arl, region, created_at, updated_at) VALUES (?, ?, ?, ?, ?)",
|
||||
(name, data['arl'], data['region'], current_time, current_time)
|
||||
(name, data["arl"], data["region"], current_time, current_time),
|
||||
)
|
||||
conn.commit()
|
||||
logger.info(f"Credential '{name}' for {service} created successfully.")
|
||||
@@ -274,7 +348,9 @@ def create_credential(service, name, data):
|
||||
except sqlite3.IntegrityError:
|
||||
raise FileExistsError(f"Credential '{name}' already exists for {service}.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating credential {name} for {service}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error creating credential {name} for {service}: {e}", exc_info=True
|
||||
)
|
||||
raise ValueError(f"Could not create credential: {e}")
|
||||
|
||||
|
||||
@@ -285,7 +361,7 @@ def get_credential(service, name):
|
||||
For Deezer, returns dict with name, arl, and region.
|
||||
Raises FileNotFoundError if the credential does not exist.
|
||||
"""
|
||||
if service not in ['spotify', 'deezer']:
|
||||
if service not in ["spotify", "deezer"]:
|
||||
raise ValueError("Service must be 'spotify' or 'deezer'")
|
||||
|
||||
with _get_db_connection() as conn:
|
||||
@@ -299,53 +375,62 @@ def get_credential(service, name):
|
||||
|
||||
data = dict(row)
|
||||
|
||||
if service == 'spotify':
|
||||
blob_file_path = BLOBS_DIR / name / 'credentials.json'
|
||||
data['blob_file_path'] = str(blob_file_path) # Keep for internal use
|
||||
if service == "spotify":
|
||||
blob_file_path = BLOBS_DIR / name / "credentials.json"
|
||||
data["blob_file_path"] = str(blob_file_path) # Keep for internal use
|
||||
try:
|
||||
with open(blob_file_path, 'r') as f_blob:
|
||||
with open(blob_file_path, "r") as f_blob:
|
||||
blob_data = json.load(f_blob)
|
||||
data['blob_content'] = blob_data
|
||||
data["blob_content"] = blob_data
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Spotify blob file not found for {name} at {blob_file_path} during get_credential.")
|
||||
data['blob_content'] = None
|
||||
logger.warning(
|
||||
f"Spotify blob file not found for {name} at {blob_file_path} during get_credential."
|
||||
)
|
||||
data["blob_content"] = None
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(f"Error decoding JSON from Spotify blob file for {name} at {blob_file_path}.")
|
||||
data['blob_content'] = None
|
||||
logger.warning(
|
||||
f"Error decoding JSON from Spotify blob file for {name} at {blob_file_path}."
|
||||
)
|
||||
data["blob_content"] = None
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error reading Spotify blob for {name}: {e}", exc_info=True)
|
||||
data['blob_content'] = None
|
||||
logger.error(
|
||||
f"Unexpected error reading Spotify blob for {name}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
data["blob_content"] = None
|
||||
|
||||
cleaned_data = {
|
||||
'name': data.get('name'),
|
||||
'region': data.get('region'),
|
||||
'blob_content': data.get('blob_content')
|
||||
"name": data.get("name"),
|
||||
"region": data.get("region"),
|
||||
"blob_content": data.get("blob_content"),
|
||||
}
|
||||
return cleaned_data
|
||||
|
||||
elif service == 'deezer':
|
||||
elif service == "deezer":
|
||||
cleaned_data = {
|
||||
'name': data.get('name'),
|
||||
'region': data.get('region'),
|
||||
'arl': data.get('arl')
|
||||
"name": data.get("name"),
|
||||
"region": data.get("region"),
|
||||
"arl": data.get("arl"),
|
||||
}
|
||||
return cleaned_data
|
||||
|
||||
# Fallback, should not be reached if service is spotify or deezer
|
||||
return None
|
||||
|
||||
|
||||
def list_credentials(service):
|
||||
if service not in ['spotify', 'deezer']:
|
||||
if service not in ["spotify", "deezer"]:
|
||||
raise ValueError("Service must be 'spotify' or 'deezer'")
|
||||
|
||||
with _get_db_connection() as conn:
|
||||
cursor = conn.cursor()
|
||||
conn.row_factory = sqlite3.Row
|
||||
cursor.execute(f"SELECT name FROM {service}")
|
||||
return [row['name'] for row in cursor.fetchall()]
|
||||
return [row["name"] for row in cursor.fetchall()]
|
||||
|
||||
|
||||
def delete_credential(service, name):
|
||||
if service not in ['spotify', 'deezer']:
|
||||
if service not in ["spotify", "deezer"]:
|
||||
raise ValueError("Service must be 'spotify' or 'deezer'")
|
||||
|
||||
with _get_db_connection() as conn:
|
||||
@@ -355,7 +440,7 @@ def delete_credential(service, name):
|
||||
if cursor.rowcount == 0:
|
||||
raise FileNotFoundError(f"Credential '{name}' not found for {service}.")
|
||||
|
||||
if service == 'spotify':
|
||||
if service == "spotify":
|
||||
blob_dir = BLOBS_DIR / name
|
||||
if blob_dir.exists():
|
||||
shutil.rmtree(blob_dir)
|
||||
@@ -363,6 +448,7 @@ def delete_credential(service, name):
|
||||
logger.info(f"Credential '{name}' for {service} deleted.")
|
||||
return {"status": "deleted", "service": service, "name": name}
|
||||
|
||||
|
||||
def edit_credential(service, name, new_data):
|
||||
"""
|
||||
Edits an existing credential.
|
||||
@@ -370,14 +456,16 @@ def edit_credential(service, name, new_data):
|
||||
new_data for Deezer can include: arl, region.
|
||||
Fields not in new_data remain unchanged.
|
||||
"""
|
||||
if service not in ['spotify', 'deezer']:
|
||||
if service not in ["spotify", "deezer"]:
|
||||
raise ValueError("Service must be 'spotify' or 'deezer'")
|
||||
|
||||
current_time = time.time()
|
||||
|
||||
# Fetch existing data first to preserve unchanged fields and for validation backup
|
||||
try:
|
||||
existing_cred = get_credential(service, name) # This will raise FileNotFoundError if not found
|
||||
existing_cred = get_credential(
|
||||
service, name
|
||||
) # This will raise FileNotFoundError if not found
|
||||
except FileNotFoundError:
|
||||
raise
|
||||
except Exception as e: # Catch other errors from get_credential
|
||||
@@ -389,75 +477,100 @@ def edit_credential(service, name, new_data):
|
||||
cursor = conn.cursor()
|
||||
conn.row_factory = sqlite3.Row
|
||||
|
||||
if service == 'spotify':
|
||||
if service == "spotify":
|
||||
# Prepare data for DB update
|
||||
db_update_data = {
|
||||
'region': updated_fields.get('region', existing_cred['region']),
|
||||
'updated_at': current_time,
|
||||
'name': name # for WHERE clause
|
||||
"region": updated_fields.get("region", existing_cred["region"]),
|
||||
"updated_at": current_time,
|
||||
"name": name, # for WHERE clause
|
||||
}
|
||||
|
||||
blob_path = Path(existing_cred['blob_file_path']) # Use path from existing
|
||||
blob_path = Path(existing_cred["blob_file_path"]) # Use path from existing
|
||||
original_blob_content = None
|
||||
if blob_path.exists():
|
||||
with open(blob_path, 'r') as f_orig_blob:
|
||||
with open(blob_path, "r") as f_orig_blob:
|
||||
original_blob_content = f_orig_blob.read()
|
||||
|
||||
# If blob_content is being updated, write it temporarily for validation
|
||||
if 'blob_content' in updated_fields:
|
||||
if "blob_content" in updated_fields:
|
||||
blob_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(blob_path, 'w') as f_new_blob:
|
||||
if isinstance(updated_fields['blob_content'], dict):
|
||||
json.dump(updated_fields['blob_content'], f_new_blob, indent=4)
|
||||
with open(blob_path, "w") as f_new_blob:
|
||||
if isinstance(updated_fields["blob_content"], dict):
|
||||
json.dump(updated_fields["blob_content"], f_new_blob, indent=4)
|
||||
else:
|
||||
f_new_blob.write(updated_fields['blob_content'])
|
||||
f_new_blob.write(updated_fields["blob_content"])
|
||||
|
||||
validation_data = {'blob_file_path': str(blob_path)}
|
||||
validation_data = {"blob_file_path": str(blob_path)}
|
||||
|
||||
try:
|
||||
_validate_with_retry('spotify', name, validation_data)
|
||||
_validate_with_retry("spotify", name, validation_data)
|
||||
|
||||
set_clause = ", ".join([f"{key} = ?" for key in db_update_data if key != 'name'])
|
||||
values = [db_update_data[key] for key in db_update_data if key != 'name'] + [name]
|
||||
cursor.execute(f"UPDATE spotify SET {set_clause} WHERE name = ?", tuple(values))
|
||||
set_clause = ", ".join(
|
||||
[f"{key} = ?" for key in db_update_data if key != "name"]
|
||||
)
|
||||
values = [
|
||||
db_update_data[key] for key in db_update_data if key != "name"
|
||||
] + [name]
|
||||
cursor.execute(
|
||||
f"UPDATE spotify SET {set_clause} WHERE name = ?", tuple(values)
|
||||
)
|
||||
|
||||
# If validation passed and blob was in new_data, it's already written.
|
||||
# If blob_content was NOT in new_data, the existing blob (if any) remains.
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
# Revert blob if it was changed and validation failed
|
||||
if 'blob_content' in updated_fields and original_blob_content is not None:
|
||||
with open(blob_path, 'w') as f_revert_blob:
|
||||
if (
|
||||
"blob_content" in updated_fields
|
||||
and original_blob_content is not None
|
||||
):
|
||||
with open(blob_path, "w") as f_revert_blob:
|
||||
f_revert_blob.write(original_blob_content)
|
||||
elif 'blob_content' in updated_fields and original_blob_content is None and blob_path.exists():
|
||||
elif (
|
||||
"blob_content" in updated_fields
|
||||
and original_blob_content is None
|
||||
and blob_path.exists()
|
||||
):
|
||||
# If new blob was written but there was no original to revert to, delete the new one.
|
||||
blob_path.unlink()
|
||||
raise # Re-raise validation or DB error
|
||||
|
||||
elif service == 'deezer':
|
||||
elif service == "deezer":
|
||||
db_update_data = {
|
||||
'arl': updated_fields.get('arl', existing_cred['arl']),
|
||||
'region': updated_fields.get('region', existing_cred['region']),
|
||||
'updated_at': current_time,
|
||||
'name': name # for WHERE clause
|
||||
"arl": updated_fields.get("arl", existing_cred["arl"]),
|
||||
"region": updated_fields.get("region", existing_cred["region"]),
|
||||
"updated_at": current_time,
|
||||
"name": name, # for WHERE clause
|
||||
}
|
||||
|
||||
validation_data = {'arl': db_update_data['arl']}
|
||||
_validate_with_retry('deezer', name, validation_data) # Validation happens before DB write for Deezer
|
||||
validation_data = {"arl": db_update_data["arl"]}
|
||||
_validate_with_retry(
|
||||
"deezer", name, validation_data
|
||||
) # Validation happens before DB write for Deezer
|
||||
|
||||
set_clause = ", ".join([f"{key} = ?" for key in db_update_data if key != 'name'])
|
||||
values = [db_update_data[key] for key in db_update_data if key != 'name'] + [name]
|
||||
cursor.execute(f"UPDATE deezer SET {set_clause} WHERE name = ?", tuple(values))
|
||||
set_clause = ", ".join(
|
||||
[f"{key} = ?" for key in db_update_data if key != "name"]
|
||||
)
|
||||
values = [
|
||||
db_update_data[key] for key in db_update_data if key != "name"
|
||||
] + [name]
|
||||
cursor.execute(
|
||||
f"UPDATE deezer SET {set_clause} WHERE name = ?", tuple(values)
|
||||
)
|
||||
|
||||
if cursor.rowcount == 0: # Should not happen if get_credential succeeded
|
||||
raise FileNotFoundError(f"Credential '{name}' for {service} disappeared during edit.")
|
||||
raise FileNotFoundError(
|
||||
f"Credential '{name}' for {service} disappeared during edit."
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
logger.info(f"Credential '{name}' for {service} updated successfully.")
|
||||
return {"status": "updated", "service": service, "name": name}
|
||||
|
||||
|
||||
# --- Helper for credential file path (mainly for Spotify blob) ---
|
||||
def get_spotify_blob_path(account_name: str) -> Path:
|
||||
return BLOBS_DIR / account_name / 'credentials.json'
|
||||
return BLOBS_DIR / account_name / "credentials.json"
|
||||
|
||||
|
||||
# It's good practice to call init_credentials_db() when the app starts.
|
||||
# This can be done in the main application setup. For now, defining it here.
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
from deezspot.easy_spoty import Spo
|
||||
import json
|
||||
from pathlib import Path
|
||||
from routes.utils.celery_queue_manager import get_config_params
|
||||
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
|
||||
|
||||
@@ -13,7 +9,6 @@ import logging
|
||||
# Initialize logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# We'll rely on get_config_params() instead of directly loading the config file
|
||||
|
||||
def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
|
||||
"""
|
||||
@@ -32,18 +27,24 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
|
||||
client_id, client_secret = _get_global_spotify_api_creds()
|
||||
|
||||
if not client_id or not client_secret:
|
||||
raise ValueError("Global Spotify API client_id or client_secret not configured in ./data/creds/search.json.")
|
||||
raise ValueError(
|
||||
"Global Spotify API client_id or client_secret not configured in ./data/creds/search.json."
|
||||
)
|
||||
|
||||
# Get config parameters including default Spotify account name
|
||||
# This might still be useful if Spo uses the account name for other things (e.g. market/region if not passed explicitly)
|
||||
# For now, we are just ensuring the API keys are set.
|
||||
config_params = get_config_params()
|
||||
main_spotify_account_name = config_params.get('spotify', '') # Still good to know which account is 'default' contextually
|
||||
main_spotify_account_name = config_params.get(
|
||||
"spotify", ""
|
||||
) # Still good to know which account is 'default' contextually
|
||||
|
||||
if not main_spotify_account_name:
|
||||
# This is less critical now that API keys are global, but could indicate a misconfiguration
|
||||
# if other parts of Spo expect an account context.
|
||||
print(f"WARN: No default Spotify account name configured in settings (main.json). API calls will use global keys.")
|
||||
print(
|
||||
"WARN: No default Spotify account name configured in settings (main.json). API calls will use global keys."
|
||||
)
|
||||
else:
|
||||
# Optionally, one could load the specific account's region here if Spo.init or methods need it,
|
||||
# but easy_spoty's Spo doesn't seem to take region directly in __init__.
|
||||
@@ -51,12 +52,16 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
|
||||
try:
|
||||
# We call get_credential just to check if the account exists,
|
||||
# not for client_id/secret anymore for Spo.__init__
|
||||
get_credential('spotify', main_spotify_account_name)
|
||||
get_credential("spotify", main_spotify_account_name)
|
||||
except FileNotFoundError:
|
||||
# This is a more serious warning if an account is expected to exist.
|
||||
print(f"WARN: Default Spotify account '{main_spotify_account_name}' configured in main.json was not found in credentials database.")
|
||||
print(
|
||||
f"WARN: Default Spotify account '{main_spotify_account_name}' configured in main.json was not found in credentials database."
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"WARN: Error accessing default Spotify account '{main_spotify_account_name}': {e}")
|
||||
print(
|
||||
f"WARN: Error accessing default Spotify account '{main_spotify_account_name}': {e}"
|
||||
)
|
||||
|
||||
# Initialize the Spotify client with GLOBAL credentials
|
||||
Spo.__init__(client_id, client_secret)
|
||||
@@ -83,6 +88,7 @@ def get_spotify_info(spotify_id, spotify_type, limit=None, offset=None):
|
||||
else:
|
||||
raise ValueError(f"Unsupported Spotify type: {spotify_type}")
|
||||
|
||||
|
||||
def get_deezer_info(deezer_id, deezer_type, limit=None):
|
||||
"""
|
||||
Get info from Deezer API.
|
||||
@@ -103,7 +109,9 @@ def get_deezer_info(deezer_id, deezer_type, limit=None):
|
||||
ValueError: If deezer_type is unsupported.
|
||||
Various exceptions from DeezerAPI (NoDataApi, QuotaExceeded, requests.exceptions.RequestException, etc.)
|
||||
"""
|
||||
logger.debug(f"Fetching Deezer info for ID {deezer_id}, type {deezer_type}, limit {limit}")
|
||||
logger.debug(
|
||||
f"Fetching Deezer info for ID {deezer_id}, type {deezer_type}, limit {limit}"
|
||||
)
|
||||
|
||||
# DeezerAPI uses class methods; its @classmethod __init__ handles setup.
|
||||
# No specific ARL or account handling here as DeezerAPI seems to use general endpoints.
|
||||
|
||||
@@ -6,29 +6,30 @@ from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
HISTORY_DIR = Path('./data/history')
|
||||
HISTORY_DB_FILE = HISTORY_DIR / 'download_history.db'
|
||||
HISTORY_DIR = Path("./data/history")
|
||||
HISTORY_DB_FILE = HISTORY_DIR / "download_history.db"
|
||||
|
||||
EXPECTED_COLUMNS = {
|
||||
'task_id': 'TEXT PRIMARY KEY',
|
||||
'download_type': 'TEXT',
|
||||
'item_name': 'TEXT',
|
||||
'item_artist': 'TEXT',
|
||||
'item_album': 'TEXT',
|
||||
'item_url': 'TEXT',
|
||||
'spotify_id': 'TEXT',
|
||||
'status_final': 'TEXT', # 'COMPLETED', 'ERROR', 'CANCELLED'
|
||||
'error_message': 'TEXT',
|
||||
'timestamp_added': 'REAL',
|
||||
'timestamp_completed': 'REAL',
|
||||
'original_request_json': 'TEXT',
|
||||
'last_status_obj_json': 'TEXT',
|
||||
'service_used': 'TEXT',
|
||||
'quality_profile': 'TEXT',
|
||||
'convert_to': 'TEXT',
|
||||
'bitrate': 'TEXT'
|
||||
"task_id": "TEXT PRIMARY KEY",
|
||||
"download_type": "TEXT",
|
||||
"item_name": "TEXT",
|
||||
"item_artist": "TEXT",
|
||||
"item_album": "TEXT",
|
||||
"item_url": "TEXT",
|
||||
"spotify_id": "TEXT",
|
||||
"status_final": "TEXT", # 'COMPLETED', 'ERROR', 'CANCELLED'
|
||||
"error_message": "TEXT",
|
||||
"timestamp_added": "REAL",
|
||||
"timestamp_completed": "REAL",
|
||||
"original_request_json": "TEXT",
|
||||
"last_status_obj_json": "TEXT",
|
||||
"service_used": "TEXT",
|
||||
"quality_profile": "TEXT",
|
||||
"convert_to": "TEXT",
|
||||
"bitrate": "TEXT",
|
||||
}
|
||||
|
||||
|
||||
def init_history_db():
|
||||
"""Initializes the download history database, creates the table if it doesn't exist,
|
||||
and adds any missing columns to an existing table."""
|
||||
@@ -42,7 +43,7 @@ def init_history_db():
|
||||
# The primary key constraint is handled by the initial CREATE TABLE.
|
||||
# If 'task_id' is missing, it cannot be added as PRIMARY KEY to an existing table
|
||||
# without complex migrations. We assume 'task_id' will exist if the table exists.
|
||||
create_table_sql = f"""
|
||||
create_table_sql = """
|
||||
CREATE TABLE IF NOT EXISTS download_history (
|
||||
task_id TEXT PRIMARY KEY,
|
||||
download_type TEXT,
|
||||
@@ -74,42 +75,54 @@ def init_history_db():
|
||||
added_columns = False
|
||||
for col_name, col_type in EXPECTED_COLUMNS.items():
|
||||
if col_name not in existing_column_names:
|
||||
if 'PRIMARY KEY' in col_type.upper() and col_name == 'task_id':
|
||||
if "PRIMARY KEY" in col_type.upper() and col_name == "task_id":
|
||||
# This case should be handled by CREATE TABLE, but as a safeguard:
|
||||
# If task_id is somehow missing and table exists, this is a problem.
|
||||
# Adding it as PK here is complex and might fail if data exists.
|
||||
# For now, we assume CREATE TABLE handles the PK.
|
||||
# If we were to add it, it would be 'ALTER TABLE download_history ADD COLUMN task_id TEXT;'
|
||||
# and then potentially a separate step to make it PK if table is empty, which is non-trivial.
|
||||
logger.warning(f"Column '{col_name}' is part of PRIMARY KEY and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN.")
|
||||
logger.warning(
|
||||
f"Column '{col_name}' is part of PRIMARY KEY and was expected to be created by CREATE TABLE. Skipping explicit ADD COLUMN."
|
||||
)
|
||||
continue
|
||||
|
||||
# For other columns, just add them.
|
||||
# Remove PRIMARY KEY from type definition if present, as it's only for table creation.
|
||||
col_type_for_add = col_type.replace(' PRIMARY KEY', '').strip()
|
||||
col_type_for_add = col_type.replace(" PRIMARY KEY", "").strip()
|
||||
try:
|
||||
cursor.execute(f"ALTER TABLE download_history ADD COLUMN {col_name} {col_type_for_add}")
|
||||
logger.info(f"Added missing column '{col_name} {col_type_for_add}' to download_history table.")
|
||||
cursor.execute(
|
||||
f"ALTER TABLE download_history ADD COLUMN {col_name} {col_type_for_add}"
|
||||
)
|
||||
logger.info(
|
||||
f"Added missing column '{col_name} {col_type_for_add}' to download_history table."
|
||||
)
|
||||
added_columns = True
|
||||
except sqlite3.OperationalError as alter_e:
|
||||
# This might happen if a column (e.g. task_id) without "PRIMARY KEY" is added by this loop
|
||||
# but the initial create table already made it a primary key.
|
||||
# Or other more complex scenarios.
|
||||
logger.warning(f"Could not add column '{col_name}': {alter_e}. It might already exist or there's a schema mismatch.")
|
||||
|
||||
logger.warning(
|
||||
f"Could not add column '{col_name}': {alter_e}. It might already exist or there's a schema mismatch."
|
||||
)
|
||||
|
||||
if added_columns:
|
||||
conn.commit()
|
||||
logger.info(f"Download history table schema updated at {HISTORY_DB_FILE}")
|
||||
else:
|
||||
logger.info(f"Download history database schema is up-to-date at {HISTORY_DB_FILE}")
|
||||
logger.info(
|
||||
f"Download history database schema is up-to-date at {HISTORY_DB_FILE}"
|
||||
)
|
||||
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error initializing download history database: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error initializing download history database: {e}", exc_info=True
|
||||
)
|
||||
finally:
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
|
||||
def add_entry_to_history(history_data: dict):
|
||||
"""Adds or replaces an entry in the download_history table.
|
||||
|
||||
@@ -118,11 +131,23 @@ def add_entry_to_history(history_data: dict):
|
||||
Expected keys match the table columns.
|
||||
"""
|
||||
required_keys = [
|
||||
'task_id', 'download_type', 'item_name', 'item_artist', 'item_album',
|
||||
'item_url', 'spotify_id', 'status_final', 'error_message',
|
||||
'timestamp_added', 'timestamp_completed', 'original_request_json',
|
||||
'last_status_obj_json', 'service_used', 'quality_profile',
|
||||
'convert_to', 'bitrate'
|
||||
"task_id",
|
||||
"download_type",
|
||||
"item_name",
|
||||
"item_artist",
|
||||
"item_album",
|
||||
"item_url",
|
||||
"spotify_id",
|
||||
"status_final",
|
||||
"error_message",
|
||||
"timestamp_added",
|
||||
"timestamp_completed",
|
||||
"original_request_json",
|
||||
"last_status_obj_json",
|
||||
"service_used",
|
||||
"quality_profile",
|
||||
"convert_to",
|
||||
"bitrate",
|
||||
]
|
||||
# Ensure all keys are present, filling with None if not
|
||||
for key in required_keys:
|
||||
@@ -132,7 +157,8 @@ def add_entry_to_history(history_data: dict):
|
||||
try:
|
||||
conn = sqlite3.connect(HISTORY_DB_FILE)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT OR REPLACE INTO download_history (
|
||||
task_id, download_type, item_name, item_artist, item_album,
|
||||
item_url, spotify_id, status_final, error_message,
|
||||
@@ -140,26 +166,49 @@ def add_entry_to_history(history_data: dict):
|
||||
last_status_obj_json, service_used, quality_profile,
|
||||
convert_to, bitrate
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", (
|
||||
history_data['task_id'], history_data['download_type'], history_data['item_name'],
|
||||
history_data['item_artist'], history_data['item_album'], history_data['item_url'],
|
||||
history_data['spotify_id'], history_data['status_final'], history_data['error_message'],
|
||||
history_data['timestamp_added'], history_data['timestamp_completed'],
|
||||
history_data['original_request_json'], history_data['last_status_obj_json'],
|
||||
history_data['service_used'], history_data['quality_profile'],
|
||||
history_data['convert_to'], history_data['bitrate']
|
||||
))
|
||||
""",
|
||||
(
|
||||
history_data["task_id"],
|
||||
history_data["download_type"],
|
||||
history_data["item_name"],
|
||||
history_data["item_artist"],
|
||||
history_data["item_album"],
|
||||
history_data["item_url"],
|
||||
history_data["spotify_id"],
|
||||
history_data["status_final"],
|
||||
history_data["error_message"],
|
||||
history_data["timestamp_added"],
|
||||
history_data["timestamp_completed"],
|
||||
history_data["original_request_json"],
|
||||
history_data["last_status_obj_json"],
|
||||
history_data["service_used"],
|
||||
history_data["quality_profile"],
|
||||
history_data["convert_to"],
|
||||
history_data["bitrate"],
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
logger.info(f"Added/Updated history for task_id: {history_data['task_id']}, status: {history_data['status_final']}")
|
||||
logger.info(
|
||||
f"Added/Updated history for task_id: {history_data['task_id']}, status: {history_data['status_final']}"
|
||||
)
|
||||
except sqlite3.Error as e:
|
||||
logger.error(f"Error adding entry to download history for task_id {history_data.get('task_id')}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error adding entry to download history for task_id {history_data.get('task_id')}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error adding to history for task_id {history_data.get('task_id')}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Unexpected error adding to history for task_id {history_data.get('task_id')}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
finally:
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_order='DESC', filters=None):
|
||||
|
||||
def get_history_entries(
|
||||
limit=25, offset=0, sort_by="timestamp_completed", sort_order="DESC", filters=None
|
||||
):
|
||||
"""Retrieves entries from the download_history table with pagination, sorting, and filtering.
|
||||
|
||||
Args:
|
||||
@@ -189,7 +238,7 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
|
||||
if filters:
|
||||
for column, value in filters.items():
|
||||
# Basic security: ensure column is a valid one (alphanumeric + underscore)
|
||||
if column.replace('_', '').isalnum():
|
||||
if column.replace("_", "").isalnum():
|
||||
where_clauses.append(f"{column} = ?")
|
||||
params.append(value)
|
||||
|
||||
@@ -204,16 +253,26 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
|
||||
|
||||
# Validate sort_by and sort_order to prevent SQL injection
|
||||
valid_sort_columns = [
|
||||
'task_id', 'download_type', 'item_name', 'item_artist', 'item_album',
|
||||
'item_url', 'status_final', 'timestamp_added', 'timestamp_completed',
|
||||
'service_used', 'quality_profile', 'convert_to', 'bitrate'
|
||||
"task_id",
|
||||
"download_type",
|
||||
"item_name",
|
||||
"item_artist",
|
||||
"item_album",
|
||||
"item_url",
|
||||
"status_final",
|
||||
"timestamp_added",
|
||||
"timestamp_completed",
|
||||
"service_used",
|
||||
"quality_profile",
|
||||
"convert_to",
|
||||
"bitrate",
|
||||
]
|
||||
if sort_by not in valid_sort_columns:
|
||||
sort_by = 'timestamp_completed' # Default sort
|
||||
sort_by = "timestamp_completed" # Default sort
|
||||
|
||||
sort_order_upper = sort_order.upper()
|
||||
if sort_order_upper not in ['ASC', 'DESC']:
|
||||
sort_order_upper = 'DESC'
|
||||
if sort_order_upper not in ["ASC", "DESC"]:
|
||||
sort_order_upper = "DESC"
|
||||
|
||||
select_query += f" ORDER BY {sort_by} {sort_order_upper} LIMIT ? OFFSET ?"
|
||||
params.extend([limit, offset])
|
||||
@@ -232,72 +291,79 @@ def get_history_entries(limit=25, offset=0, sort_by='timestamp_completed', sort_
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
# For testing purposes
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
init_history_db()
|
||||
|
||||
sample_data_complete = {
|
||||
'task_id': 'test_task_123',
|
||||
'download_type': 'track',
|
||||
'item_name': 'Test Song',
|
||||
'item_artist': 'Test Artist',
|
||||
'item_album': 'Test Album',
|
||||
'item_url': 'http://spotify.com/track/123',
|
||||
'spotify_id': '123',
|
||||
'status_final': 'COMPLETED',
|
||||
'error_message': None,
|
||||
'timestamp_added': time.time() - 3600,
|
||||
'timestamp_completed': time.time(),
|
||||
'original_request_json': json.dumps({'param1': 'value1'}),
|
||||
'last_status_obj_json': json.dumps({'status': 'complete', 'message': 'Finished!'}),
|
||||
'service_used': 'Spotify (Primary)',
|
||||
'quality_profile': 'NORMAL',
|
||||
'convert_to': None,
|
||||
'bitrate': None
|
||||
"task_id": "test_task_123",
|
||||
"download_type": "track",
|
||||
"item_name": "Test Song",
|
||||
"item_artist": "Test Artist",
|
||||
"item_album": "Test Album",
|
||||
"item_url": "http://spotify.com/track/123",
|
||||
"spotify_id": "123",
|
||||
"status_final": "COMPLETED",
|
||||
"error_message": None,
|
||||
"timestamp_added": time.time() - 3600,
|
||||
"timestamp_completed": time.time(),
|
||||
"original_request_json": json.dumps({"param1": "value1"}),
|
||||
"last_status_obj_json": json.dumps(
|
||||
{"status": "complete", "message": "Finished!"}
|
||||
),
|
||||
"service_used": "Spotify (Primary)",
|
||||
"quality_profile": "NORMAL",
|
||||
"convert_to": None,
|
||||
"bitrate": None,
|
||||
}
|
||||
add_entry_to_history(sample_data_complete)
|
||||
|
||||
sample_data_error = {
|
||||
'task_id': 'test_task_456',
|
||||
'download_type': 'album',
|
||||
'item_name': 'Another Album',
|
||||
'item_artist': 'Another Artist',
|
||||
'item_album': 'Another Album', # For albums, item_name and item_album are often the same
|
||||
'item_url': 'http://spotify.com/album/456',
|
||||
'spotify_id': '456',
|
||||
'status_final': 'ERROR',
|
||||
'error_message': 'Download failed due to network issue.',
|
||||
'timestamp_added': time.time() - 7200,
|
||||
'timestamp_completed': time.time() - 60,
|
||||
'original_request_json': json.dumps({'param2': 'value2'}),
|
||||
'last_status_obj_json': json.dumps({'status': 'error', 'error': 'Network issue'}),
|
||||
'service_used': 'Deezer',
|
||||
'quality_profile': 'MP3_320',
|
||||
'convert_to': 'mp3',
|
||||
'bitrate': '320'
|
||||
"task_id": "test_task_456",
|
||||
"download_type": "album",
|
||||
"item_name": "Another Album",
|
||||
"item_artist": "Another Artist",
|
||||
"item_album": "Another Album", # For albums, item_name and item_album are often the same
|
||||
"item_url": "http://spotify.com/album/456",
|
||||
"spotify_id": "456",
|
||||
"status_final": "ERROR",
|
||||
"error_message": "Download failed due to network issue.",
|
||||
"timestamp_added": time.time() - 7200,
|
||||
"timestamp_completed": time.time() - 60,
|
||||
"original_request_json": json.dumps({"param2": "value2"}),
|
||||
"last_status_obj_json": json.dumps(
|
||||
{"status": "error", "error": "Network issue"}
|
||||
),
|
||||
"service_used": "Deezer",
|
||||
"quality_profile": "MP3_320",
|
||||
"convert_to": "mp3",
|
||||
"bitrate": "320",
|
||||
}
|
||||
add_entry_to_history(sample_data_error)
|
||||
|
||||
# Test updating an entry
|
||||
updated_data_complete = {
|
||||
'task_id': 'test_task_123',
|
||||
'download_type': 'track',
|
||||
'item_name': 'Test Song (Updated)',
|
||||
'item_artist': 'Test Artist',
|
||||
'item_album': 'Test Album II',
|
||||
'item_url': 'http://spotify.com/track/123',
|
||||
'spotify_id': '123',
|
||||
'status_final': 'COMPLETED',
|
||||
'error_message': None,
|
||||
'timestamp_added': time.time() - 3600,
|
||||
'timestamp_completed': time.time() + 100, # Updated completion time
|
||||
'original_request_json': json.dumps({'param1': 'value1', 'new_param': 'added'}),
|
||||
'last_status_obj_json': json.dumps({'status': 'complete', 'message': 'Finished! With update.'}),
|
||||
'service_used': 'Spotify (Deezer Fallback)',
|
||||
'quality_profile': 'HIGH',
|
||||
'convert_to': 'flac',
|
||||
'bitrate': None
|
||||
"task_id": "test_task_123",
|
||||
"download_type": "track",
|
||||
"item_name": "Test Song (Updated)",
|
||||
"item_artist": "Test Artist",
|
||||
"item_album": "Test Album II",
|
||||
"item_url": "http://spotify.com/track/123",
|
||||
"spotify_id": "123",
|
||||
"status_final": "COMPLETED",
|
||||
"error_message": None,
|
||||
"timestamp_added": time.time() - 3600,
|
||||
"timestamp_completed": time.time() + 100, # Updated completion time
|
||||
"original_request_json": json.dumps({"param1": "value1", "new_param": "added"}),
|
||||
"last_status_obj_json": json.dumps(
|
||||
{"status": "complete", "message": "Finished! With update."}
|
||||
),
|
||||
"service_used": "Spotify (Deezer Fallback)",
|
||||
"quality_profile": "HIGH",
|
||||
"convert_to": "flac",
|
||||
"bitrate": None,
|
||||
}
|
||||
add_entry_to_history(updated_data_complete)
|
||||
|
||||
@@ -310,13 +376,17 @@ if __name__ == '__main__':
|
||||
print(entry)
|
||||
|
||||
print("\nFetching history entries (sorted by item_name ASC, limit 2, offset 1):")
|
||||
entries_sorted, total_sorted = get_history_entries(limit=2, offset=1, sort_by='item_name', sort_order='ASC')
|
||||
entries_sorted, total_sorted = get_history_entries(
|
||||
limit=2, offset=1, sort_by="item_name", sort_order="ASC"
|
||||
)
|
||||
print(f"Total entries (should be same as above): {total_sorted}")
|
||||
for entry in entries_sorted:
|
||||
print(entry)
|
||||
|
||||
print("\nFetching history entries with filter (status_final = COMPLETED):")
|
||||
entries_filtered, total_filtered = get_history_entries(filters={'status_final': 'COMPLETED'})
|
||||
entries_filtered, total_filtered = get_history_entries(
|
||||
filters={"status_final": "COMPLETED"}
|
||||
)
|
||||
print(f"Total COMPLETED entries: {total_filtered}")
|
||||
for entry in entries_filtered:
|
||||
print(entry)
|
||||
@@ -1,11 +1,9 @@
|
||||
import os
|
||||
import json
|
||||
import traceback
|
||||
from deezspot.spotloader import SpoLogin
|
||||
from deezspot.deezloader import DeeLogin
|
||||
from pathlib import Path
|
||||
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
|
||||
from routes.utils.celery_config import get_config_params
|
||||
|
||||
|
||||
def download_playlist(
|
||||
url,
|
||||
@@ -23,51 +21,63 @@ def download_playlist(
|
||||
max_retries=3,
|
||||
progress_callback=None,
|
||||
convert_to=None,
|
||||
bitrate=None
|
||||
bitrate=None,
|
||||
):
|
||||
try:
|
||||
# Detect URL source (Spotify or Deezer) from URL
|
||||
is_spotify_url = 'open.spotify.com' in url.lower()
|
||||
is_deezer_url = 'deezer.com' in url.lower()
|
||||
is_spotify_url = "open.spotify.com" in url.lower()
|
||||
is_deezer_url = "deezer.com" in url.lower()
|
||||
|
||||
service = ''
|
||||
service = ""
|
||||
if is_spotify_url:
|
||||
service = 'spotify'
|
||||
service = "spotify"
|
||||
elif is_deezer_url:
|
||||
service = 'deezer'
|
||||
service = "deezer"
|
||||
else:
|
||||
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
|
||||
print(f"ERROR: {error_msg}")
|
||||
raise ValueError(error_msg)
|
||||
|
||||
print(f"DEBUG: playlist.py - Service determined from URL: {service}")
|
||||
print(f"DEBUG: playlist.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'")
|
||||
print(
|
||||
f"DEBUG: playlist.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
|
||||
)
|
||||
|
||||
# Get global Spotify API credentials
|
||||
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds()
|
||||
global_spotify_client_id, global_spotify_client_secret = (
|
||||
_get_global_spotify_api_creds()
|
||||
)
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
warning_msg = "WARN: playlist.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
|
||||
print(warning_msg)
|
||||
|
||||
if service == 'spotify':
|
||||
if service == "spotify":
|
||||
if fallback: # Fallback is a Deezer account name for a Spotify URL
|
||||
if quality is None: quality = 'FLAC' # Deezer quality for first attempt
|
||||
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails)
|
||||
if quality is None:
|
||||
quality = "FLAC" # Deezer quality for first attempt
|
||||
if fall_quality is None:
|
||||
fall_quality = (
|
||||
"HIGH" # Spotify quality for fallback (if Deezer fails)
|
||||
)
|
||||
|
||||
deezer_error = None
|
||||
try:
|
||||
# Attempt 1: Deezer via download_playlistspo (using 'fallback' as Deezer account name)
|
||||
print(f"DEBUG: playlist.py - Spotify URL. Attempt 1: Deezer (account: {fallback})")
|
||||
deezer_fallback_creds = get_credential('deezer', fallback)
|
||||
arl = deezer_fallback_creds.get('arl')
|
||||
print(
|
||||
f"DEBUG: playlist.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
|
||||
)
|
||||
deezer_fallback_creds = get_credential("deezer", fallback)
|
||||
arl = deezer_fallback_creds.get("arl")
|
||||
if not arl:
|
||||
raise ValueError(f"ARL not found for Deezer account '{fallback}'.")
|
||||
raise ValueError(
|
||||
f"ARL not found for Deezer account '{fallback}'."
|
||||
)
|
||||
|
||||
dl = DeeLogin(
|
||||
arl=arl,
|
||||
spotify_client_id=global_spotify_client_id,
|
||||
spotify_client_secret=global_spotify_client_secret,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
dl.download_playlistspo(
|
||||
link_playlist=url, # Spotify URL
|
||||
@@ -85,30 +95,45 @@ def download_playlist(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL."
|
||||
)
|
||||
print(f"DEBUG: playlist.py - Playlist download via Deezer (account: {fallback}) successful for Spotify URL.")
|
||||
except Exception as e:
|
||||
deezer_error = e
|
||||
print(f"ERROR: playlist.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}")
|
||||
print(
|
||||
f"ERROR: playlist.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
|
||||
)
|
||||
traceback.print_exc()
|
||||
print(f"DEBUG: playlist.py - Attempting Spotify direct download (account: {main} for blob)...")
|
||||
print(
|
||||
f"DEBUG: playlist.py - Attempting Spotify direct download (account: {main} for blob)..."
|
||||
)
|
||||
|
||||
# Attempt 2: Spotify direct via download_playlist (using 'main' as Spotify account for blob)
|
||||
try:
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
|
||||
if (
|
||||
not global_spotify_client_id
|
||||
or not global_spotify_client_secret
|
||||
):
|
||||
raise ValueError(
|
||||
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
|
||||
)
|
||||
|
||||
spotify_main_creds = get_credential('spotify', main) # For blob path
|
||||
blob_file_path = spotify_main_creds.get('blob_file_path')
|
||||
spotify_main_creds = get_credential(
|
||||
"spotify", main
|
||||
) # For blob path
|
||||
blob_file_path = spotify_main_creds.get("blob_file_path")
|
||||
if not Path(blob_file_path).exists():
|
||||
raise FileNotFoundError(f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'")
|
||||
raise FileNotFoundError(
|
||||
f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'"
|
||||
)
|
||||
|
||||
spo = SpoLogin(
|
||||
credentials_path=blob_file_path,
|
||||
spotify_client_id=global_spotify_client_id,
|
||||
spotify_client_secret=global_spotify_client_secret,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
spo.download_playlist(
|
||||
link_playlist=url, # Spotify URL
|
||||
@@ -127,33 +152,44 @@ def download_playlist(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful."
|
||||
)
|
||||
print(f"DEBUG: playlist.py - Spotify direct download (account: {main} for blob) successful.")
|
||||
except Exception as e2:
|
||||
print(f"ERROR: playlist.py - Spotify direct download (account: {main} for blob) also failed: {e2}")
|
||||
print(
|
||||
f"ERROR: playlist.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
|
||||
f"Deezer error: {deezer_error}, Spotify error: {e2}"
|
||||
) from e2
|
||||
else:
|
||||
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
|
||||
if quality is None: quality = 'HIGH' # Default Spotify quality
|
||||
print(f"DEBUG: playlist.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}")
|
||||
if quality is None:
|
||||
quality = "HIGH" # Default Spotify quality
|
||||
print(
|
||||
f"DEBUG: playlist.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
|
||||
)
|
||||
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
|
||||
raise ValueError(
|
||||
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
|
||||
)
|
||||
|
||||
spotify_main_creds = get_credential('spotify', main) # For blob path
|
||||
blob_file_path = spotify_main_creds.get('blob_file_path')
|
||||
spotify_main_creds = get_credential("spotify", main) # For blob path
|
||||
blob_file_path = spotify_main_creds.get("blob_file_path")
|
||||
if not Path(blob_file_path).exists():
|
||||
raise FileNotFoundError(f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'")
|
||||
raise FileNotFoundError(
|
||||
f"Spotify credentials blob file not found at {blob_file_path} for account '{main}'"
|
||||
)
|
||||
|
||||
spo = SpoLogin(
|
||||
credentials_path=blob_file_path,
|
||||
spotify_client_id=global_spotify_client_id,
|
||||
spotify_client_secret=global_spotify_client_secret,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
spo.download_playlist(
|
||||
link_playlist=url,
|
||||
@@ -172,16 +208,21 @@ def download_playlist(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful."
|
||||
)
|
||||
print(f"DEBUG: playlist.py - Direct Spotify download (account: {main} for blob) successful.")
|
||||
|
||||
elif service == 'deezer':
|
||||
elif service == "deezer":
|
||||
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
|
||||
if quality is None: quality = 'FLAC' # Default Deezer quality
|
||||
print(f"DEBUG: playlist.py - Deezer URL. Direct download with Deezer account: {main}")
|
||||
deezer_main_creds = get_credential('deezer', main) # For ARL
|
||||
arl = deezer_main_creds.get('arl')
|
||||
if quality is None:
|
||||
quality = "FLAC" # Default Deezer quality
|
||||
print(
|
||||
f"DEBUG: playlist.py - Deezer URL. Direct download with Deezer account: {main}"
|
||||
)
|
||||
deezer_main_creds = get_credential("deezer", main) # For ARL
|
||||
arl = deezer_main_creds.get("arl")
|
||||
if not arl:
|
||||
raise ValueError(f"ARL not found for Deezer account '{main}'.")
|
||||
|
||||
@@ -189,7 +230,7 @@ def download_playlist(
|
||||
arl=arl, # Account specific ARL
|
||||
spotify_client_id=global_spotify_client_id, # Global Spotify keys
|
||||
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
dl.download_playlistdee( # Deezer URL, download via Deezer
|
||||
link_playlist=url,
|
||||
@@ -206,9 +247,11 @@ def download_playlist(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful."
|
||||
)
|
||||
print(f"DEBUG: playlist.py - Direct Deezer download (account: {main}) successful.")
|
||||
else:
|
||||
# Should be caught by initial service check, but as a safeguard
|
||||
raise ValueError(f"Unsupported service determined: {service}")
|
||||
|
||||
@@ -1,50 +1,58 @@
|
||||
from deezspot.easy_spoty import Spo
|
||||
import json
|
||||
from pathlib import Path
|
||||
import logging
|
||||
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds
|
||||
|
||||
# Configure logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def search(
|
||||
query: str,
|
||||
search_type: str,
|
||||
limit: int = 3,
|
||||
main: str = None
|
||||
) -> dict:
|
||||
logger.info(f"Search requested: query='{query}', type={search_type}, limit={limit}, main_account_name={main}")
|
||||
|
||||
def search(query: str, search_type: str, limit: int = 3, main: str = None) -> dict:
|
||||
logger.info(
|
||||
f"Search requested: query='{query}', type={search_type}, limit={limit}, main_account_name={main}"
|
||||
)
|
||||
|
||||
client_id, client_secret = _get_global_spotify_api_creds()
|
||||
|
||||
if not client_id or not client_secret:
|
||||
logger.error("Global Spotify API client_id or client_secret not configured in ./data/creds/search.json.")
|
||||
raise ValueError("Spotify API credentials are not configured globally for search.")
|
||||
logger.error(
|
||||
"Global Spotify API client_id or client_secret not configured in ./data/creds/search.json."
|
||||
)
|
||||
raise ValueError(
|
||||
"Spotify API credentials are not configured globally for search."
|
||||
)
|
||||
|
||||
if main:
|
||||
logger.debug(f"Spotify account context '{main}' was provided for search. API keys are global, but this account might be used for other context by Spo if relevant.")
|
||||
logger.debug(
|
||||
f"Spotify account context '{main}' was provided for search. API keys are global, but this account might be used for other context by Spo if relevant."
|
||||
)
|
||||
try:
|
||||
get_credential('spotify', main)
|
||||
get_credential("spotify", main)
|
||||
logger.debug(f"Spotify account '{main}' exists.")
|
||||
except FileNotFoundError:
|
||||
logger.warning(f"Spotify account '{main}' provided for search context not found in credentials. Search will proceed with global API keys.")
|
||||
logger.warning(
|
||||
f"Spotify account '{main}' provided for search context not found in credentials. Search will proceed with global API keys."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error checking existence of Spotify account '{main}': {e}. Search will proceed with global API keys.")
|
||||
logger.warning(
|
||||
f"Error checking existence of Spotify account '{main}': {e}. Search will proceed with global API keys."
|
||||
)
|
||||
else:
|
||||
logger.debug("No specific 'main' account context provided for search. Using global API keys.")
|
||||
logger.debug(
|
||||
"No specific 'main' account context provided for search. Using global API keys."
|
||||
)
|
||||
|
||||
logger.debug(f"Initializing Spotify client with global API credentials for search.")
|
||||
logger.debug("Initializing Spotify client with global API credentials for search.")
|
||||
Spo.__init__(client_id, client_secret)
|
||||
|
||||
logger.debug(f"Executing Spotify search with query='{query}', type={search_type}, limit={limit}")
|
||||
try:
|
||||
spotify_response = Spo.search(
|
||||
query=query,
|
||||
search_type=search_type,
|
||||
limit=limit
|
||||
logger.debug(
|
||||
f"Executing Spotify search with query='{query}', type={search_type}, limit={limit}"
|
||||
)
|
||||
try:
|
||||
spotify_response = Spo.search(query=query, search_type=search_type, limit=limit)
|
||||
logger.info(f"Search completed successfully for query: '{query}'")
|
||||
return spotify_response
|
||||
except Exception as e:
|
||||
logger.error(f"Error during Spotify search for query '{query}': {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error during Spotify search for query '{query}': {e}", exc_info=True
|
||||
)
|
||||
raise
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import os
|
||||
import json
|
||||
import traceback
|
||||
from deezspot.spotloader import SpoLogin
|
||||
from deezspot.deezloader import DeeLogin
|
||||
from pathlib import Path
|
||||
from routes.utils.credentials import get_credential, _get_global_spotify_api_creds, get_spotify_blob_path
|
||||
from routes.utils.celery_config import get_config_params
|
||||
from routes.utils.credentials import (
|
||||
get_credential,
|
||||
_get_global_spotify_api_creds,
|
||||
get_spotify_blob_path,
|
||||
)
|
||||
|
||||
|
||||
def download_track(
|
||||
url,
|
||||
@@ -23,28 +24,32 @@ def download_track(
|
||||
max_retries=3,
|
||||
progress_callback=None,
|
||||
convert_to=None,
|
||||
bitrate=None
|
||||
bitrate=None,
|
||||
):
|
||||
try:
|
||||
# Detect URL source (Spotify or Deezer) from URL
|
||||
is_spotify_url = 'open.spotify.com' in url.lower()
|
||||
is_deezer_url = 'deezer.com' in url.lower()
|
||||
is_spotify_url = "open.spotify.com" in url.lower()
|
||||
is_deezer_url = "deezer.com" in url.lower()
|
||||
|
||||
service = ''
|
||||
service = ""
|
||||
if is_spotify_url:
|
||||
service = 'spotify'
|
||||
service = "spotify"
|
||||
elif is_deezer_url:
|
||||
service = 'deezer'
|
||||
service = "deezer"
|
||||
else:
|
||||
error_msg = "Invalid URL: Must be from open.spotify.com or deezer.com"
|
||||
print(f"ERROR: {error_msg}")
|
||||
raise ValueError(error_msg)
|
||||
|
||||
print(f"DEBUG: track.py - Service determined from URL: {service}")
|
||||
print(f"DEBUG: track.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'")
|
||||
print(
|
||||
f"DEBUG: track.py - Credentials provided: main_account_name='{main}', fallback_account_name='{fallback}'"
|
||||
)
|
||||
|
||||
# Get global Spotify API credentials for SpoLogin and DeeLogin (if it uses Spotify search)
|
||||
global_spotify_client_id, global_spotify_client_secret = _get_global_spotify_api_creds()
|
||||
global_spotify_client_id, global_spotify_client_secret = (
|
||||
_get_global_spotify_api_creds()
|
||||
)
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
# This is a critical failure if Spotify operations are involved
|
||||
warning_msg = "WARN: track.py - Global Spotify client_id/secret not found in search.json. Spotify operations will likely fail."
|
||||
@@ -52,25 +57,33 @@ def download_track(
|
||||
# Depending on flow, might want to raise error here if service is 'spotify'
|
||||
# For now, let it proceed and fail at SpoLogin/DeeLogin init if keys are truly needed and missing.
|
||||
|
||||
if service == 'spotify':
|
||||
if service == "spotify":
|
||||
if fallback: # Fallback is a Deezer account name for a Spotify URL
|
||||
if quality is None: quality = 'FLAC' # Deezer quality for first attempt
|
||||
if fall_quality is None: fall_quality = 'HIGH' # Spotify quality for fallback (if Deezer fails)
|
||||
if quality is None:
|
||||
quality = "FLAC" # Deezer quality for first attempt
|
||||
if fall_quality is None:
|
||||
fall_quality = (
|
||||
"HIGH" # Spotify quality for fallback (if Deezer fails)
|
||||
)
|
||||
|
||||
deezer_error = None
|
||||
try:
|
||||
# Attempt 1: Deezer via download_trackspo (using 'fallback' as Deezer account name)
|
||||
print(f"DEBUG: track.py - Spotify URL. Attempt 1: Deezer (account: {fallback})")
|
||||
deezer_fallback_creds = get_credential('deezer', fallback)
|
||||
arl = deezer_fallback_creds.get('arl')
|
||||
print(
|
||||
f"DEBUG: track.py - Spotify URL. Attempt 1: Deezer (account: {fallback})"
|
||||
)
|
||||
deezer_fallback_creds = get_credential("deezer", fallback)
|
||||
arl = deezer_fallback_creds.get("arl")
|
||||
if not arl:
|
||||
raise ValueError(f"ARL not found for Deezer account '{fallback}'.")
|
||||
raise ValueError(
|
||||
f"ARL not found for Deezer account '{fallback}'."
|
||||
)
|
||||
|
||||
dl = DeeLogin(
|
||||
arl=arl,
|
||||
spotify_client_id=global_spotify_client_id, # Global creds
|
||||
spotify_client_secret=global_spotify_client_secret, # Global creds
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
# download_trackspo means: Spotify URL, download via Deezer
|
||||
dl.download_trackspo(
|
||||
@@ -87,30 +100,47 @@ def download_track(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL."
|
||||
)
|
||||
print(f"DEBUG: track.py - Track download via Deezer (account: {fallback}) successful for Spotify URL.")
|
||||
except Exception as e:
|
||||
deezer_error = e
|
||||
print(f"ERROR: track.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}")
|
||||
print(
|
||||
f"ERROR: track.py - Deezer attempt (account: {fallback}) for Spotify URL failed: {e}"
|
||||
)
|
||||
traceback.print_exc()
|
||||
print(f"DEBUG: track.py - Attempting Spotify direct download (account: {main})...")
|
||||
print(
|
||||
f"DEBUG: track.py - Attempting Spotify direct download (account: {main})..."
|
||||
)
|
||||
|
||||
# Attempt 2: Spotify direct via download_track (using 'main' as Spotify account for blob)
|
||||
try:
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
|
||||
if (
|
||||
not global_spotify_client_id
|
||||
or not global_spotify_client_secret
|
||||
):
|
||||
raise ValueError(
|
||||
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
|
||||
)
|
||||
|
||||
# Use get_spotify_blob_path directly
|
||||
blob_file_path = get_spotify_blob_path(main)
|
||||
if not blob_file_path.exists(): # Check existence on the Path object
|
||||
raise FileNotFoundError(f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'")
|
||||
if (
|
||||
not blob_file_path.exists()
|
||||
): # Check existence on the Path object
|
||||
raise FileNotFoundError(
|
||||
f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'"
|
||||
)
|
||||
|
||||
spo = SpoLogin(
|
||||
credentials_path=str(blob_file_path), # Account specific blob
|
||||
credentials_path=str(
|
||||
blob_file_path
|
||||
), # Account specific blob
|
||||
spotify_client_id=global_spotify_client_id, # Global API keys
|
||||
spotify_client_secret=global_spotify_client_secret, # Global API keys
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
spo.download_track(
|
||||
link_track=url, # Spotify URL
|
||||
@@ -128,33 +158,44 @@ def download_track(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful."
|
||||
)
|
||||
print(f"DEBUG: track.py - Spotify direct download (account: {main} for blob) successful.")
|
||||
except Exception as e2:
|
||||
print(f"ERROR: track.py - Spotify direct download (account: {main} for blob) also failed: {e2}")
|
||||
print(
|
||||
f"ERROR: track.py - Spotify direct download (account: {main} for blob) also failed: {e2}"
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"Both Deezer attempt (account: {fallback}) and Spotify direct (account: {main} for blob) failed. "
|
||||
f"Deezer error: {deezer_error}, Spotify error: {e2}"
|
||||
) from e2
|
||||
else:
|
||||
# Spotify URL, no fallback. Direct Spotify download using 'main' (Spotify account for blob)
|
||||
if quality is None: quality = 'HIGH' # Default Spotify quality
|
||||
print(f"DEBUG: track.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}")
|
||||
if quality is None:
|
||||
quality = "HIGH" # Default Spotify quality
|
||||
print(
|
||||
f"DEBUG: track.py - Spotify URL, no fallback. Direct download with Spotify account (for blob): {main}"
|
||||
)
|
||||
|
||||
if not global_spotify_client_id or not global_spotify_client_secret:
|
||||
raise ValueError("Global Spotify API credentials (client_id/secret) not configured for Spotify download.")
|
||||
raise ValueError(
|
||||
"Global Spotify API credentials (client_id/secret) not configured for Spotify download."
|
||||
)
|
||||
|
||||
# Use get_spotify_blob_path directly
|
||||
blob_file_path = get_spotify_blob_path(main)
|
||||
if not blob_file_path.exists(): # Check existence on the Path object
|
||||
raise FileNotFoundError(f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'")
|
||||
raise FileNotFoundError(
|
||||
f"Spotify credentials blob file not found at {str(blob_file_path)} for account '{main}'"
|
||||
)
|
||||
|
||||
spo = SpoLogin(
|
||||
credentials_path=str(blob_file_path), # Account specific blob
|
||||
spotify_client_id=global_spotify_client_id, # Global API keys
|
||||
spotify_client_secret=global_spotify_client_secret, # Global API keys
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
spo.download_track(
|
||||
link_track=url,
|
||||
@@ -172,16 +213,21 @@ def download_track(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful."
|
||||
)
|
||||
print(f"DEBUG: track.py - Direct Spotify download (account: {main} for blob) successful.")
|
||||
|
||||
elif service == 'deezer':
|
||||
elif service == "deezer":
|
||||
# Deezer URL. Direct Deezer download using 'main' (Deezer account name for ARL)
|
||||
if quality is None: quality = 'FLAC' # Default Deezer quality
|
||||
print(f"DEBUG: track.py - Deezer URL. Direct download with Deezer account: {main}")
|
||||
deezer_main_creds = get_credential('deezer', main) # For ARL
|
||||
arl = deezer_main_creds.get('arl')
|
||||
if quality is None:
|
||||
quality = "FLAC" # Default Deezer quality
|
||||
print(
|
||||
f"DEBUG: track.py - Deezer URL. Direct download with Deezer account: {main}"
|
||||
)
|
||||
deezer_main_creds = get_credential("deezer", main) # For ARL
|
||||
arl = deezer_main_creds.get("arl")
|
||||
if not arl:
|
||||
raise ValueError(f"ARL not found for Deezer account '{main}'.")
|
||||
|
||||
@@ -189,7 +235,7 @@ def download_track(
|
||||
arl=arl, # Account specific ARL
|
||||
spotify_client_id=global_spotify_client_id, # Global Spotify keys for internal Spo use by DeeLogin
|
||||
spotify_client_secret=global_spotify_client_secret, # Global Spotify keys
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
dl.download_trackdee( # Deezer URL, download via Deezer
|
||||
link_track=url,
|
||||
@@ -205,12 +251,14 @@ def download_track(
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
bitrate=bitrate,
|
||||
)
|
||||
print(
|
||||
f"DEBUG: track.py - Direct Deezer download (account: {main}) successful."
|
||||
)
|
||||
print(f"DEBUG: track.py - Direct Deezer download (account: {main}) successful.")
|
||||
else:
|
||||
# Should be caught by initial service check, but as a safeguard
|
||||
raise ValueError(f"Unsupported service determined: {service}")
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ import threading
|
||||
import logging
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, List, Dict
|
||||
|
||||
from routes.utils.watch.db import (
|
||||
get_watched_playlists,
|
||||
@@ -12,18 +13,18 @@ from routes.utils.watch.db import (
|
||||
update_playlist_snapshot,
|
||||
mark_tracks_as_not_present_in_spotify,
|
||||
# Artist watch DB functions
|
||||
init_artists_db,
|
||||
get_watched_artists,
|
||||
get_watched_artist,
|
||||
get_artist_album_ids_from_db,
|
||||
add_or_update_album_for_artist, # Renamed from add_album_to_artist_db
|
||||
update_artist_metadata_after_check # Renamed from update_artist_metadata
|
||||
update_artist_metadata_after_check, # Renamed from update_artist_metadata
|
||||
)
|
||||
from routes.utils.get_info import get_spotify_info # To fetch playlist, track, artist, and album details
|
||||
from routes.utils.get_info import (
|
||||
get_spotify_info,
|
||||
) # To fetch playlist, track, artist, and album details
|
||||
from routes.utils.celery_queue_manager import download_queue_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
CONFIG_FILE_PATH = Path('./data/config/watch.json')
|
||||
CONFIG_FILE_PATH = Path("./data/config/watch.json")
|
||||
STOP_EVENT = threading.Event()
|
||||
|
||||
DEFAULT_WATCH_CONFIG = {
|
||||
@@ -32,9 +33,10 @@ DEFAULT_WATCH_CONFIG = {
|
||||
"max_tracks_per_run": 50, # For playlists
|
||||
"watchedArtistAlbumGroup": ["album", "single"], # Default for artists
|
||||
"delay_between_playlists_seconds": 2,
|
||||
"delay_between_artists_seconds": 5 # Added for artists
|
||||
"delay_between_artists_seconds": 5, # Added for artists
|
||||
}
|
||||
|
||||
|
||||
def get_watch_config():
|
||||
"""Loads the watch configuration from watch.json.
|
||||
Creates the file with defaults if it doesn't exist.
|
||||
@@ -45,12 +47,14 @@ def get_watch_config():
|
||||
CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not CONFIG_FILE_PATH.exists():
|
||||
logger.info(f"{CONFIG_FILE_PATH} not found. Creating with default watch config.")
|
||||
with open(CONFIG_FILE_PATH, 'w') as f:
|
||||
logger.info(
|
||||
f"{CONFIG_FILE_PATH} not found. Creating with default watch config."
|
||||
)
|
||||
with open(CONFIG_FILE_PATH, "w") as f:
|
||||
json.dump(DEFAULT_WATCH_CONFIG, f, indent=2)
|
||||
return DEFAULT_WATCH_CONFIG.copy()
|
||||
|
||||
with open(CONFIG_FILE_PATH, 'r') as f:
|
||||
with open(CONFIG_FILE_PATH, "r") as f:
|
||||
config = json.load(f)
|
||||
|
||||
updated = False
|
||||
@@ -60,28 +64,39 @@ def get_watch_config():
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
logger.info(f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults.")
|
||||
with open(CONFIG_FILE_PATH, 'w') as f:
|
||||
logger.info(
|
||||
f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults."
|
||||
)
|
||||
with open(CONFIG_FILE_PATH, "w") as f:
|
||||
json.dump(config, f, indent=2)
|
||||
return config
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return DEFAULT_WATCH_CONFIG.copy() # Fallback
|
||||
|
||||
|
||||
def construct_spotify_url(item_id, item_type="track"):
|
||||
return f"https://open.spotify.com/{item_type}/{item_id}"
|
||||
|
||||
|
||||
def check_watched_playlists(specific_playlist_id: str = None):
|
||||
"""Checks watched playlists for new tracks and queues downloads.
|
||||
If specific_playlist_id is provided, only that playlist is checked.
|
||||
"""
|
||||
logger.info(f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}")
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}"
|
||||
)
|
||||
config = get_watch_config()
|
||||
|
||||
if specific_playlist_id:
|
||||
playlist_obj = get_watched_playlist(specific_playlist_id)
|
||||
if not playlist_obj:
|
||||
logger.error(f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database.")
|
||||
logger.error(
|
||||
f"Playlist Watch Manager: Playlist {specific_playlist_id} not found in watch database."
|
||||
)
|
||||
return
|
||||
watched_playlists_to_check = [playlist_obj]
|
||||
else:
|
||||
@@ -92,19 +107,30 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
return
|
||||
|
||||
for playlist_in_db in watched_playlists_to_check:
|
||||
playlist_spotify_id = playlist_in_db['spotify_id']
|
||||
playlist_name = playlist_in_db['name']
|
||||
logger.info(f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})...")
|
||||
playlist_spotify_id = playlist_in_db["spotify_id"]
|
||||
playlist_name = playlist_in_db["name"]
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Checking playlist '{playlist_name}' ({playlist_spotify_id})..."
|
||||
)
|
||||
|
||||
try:
|
||||
# For playlists, we fetch all tracks in one go usually (Spotify API limit permitting)
|
||||
current_playlist_data_from_api = get_spotify_info(playlist_spotify_id, "playlist")
|
||||
if not current_playlist_data_from_api or 'tracks' not in current_playlist_data_from_api:
|
||||
logger.error(f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}.")
|
||||
current_playlist_data_from_api = get_spotify_info(
|
||||
playlist_spotify_id, "playlist"
|
||||
)
|
||||
if (
|
||||
not current_playlist_data_from_api
|
||||
or "tracks" not in current_playlist_data_from_api
|
||||
):
|
||||
logger.error(
|
||||
f"Playlist Watch Manager: Failed to fetch data or tracks from Spotify for playlist {playlist_spotify_id}."
|
||||
)
|
||||
continue
|
||||
|
||||
api_snapshot_id = current_playlist_data_from_api.get('snapshot_id')
|
||||
api_total_tracks = current_playlist_data_from_api.get('tracks', {}).get('total', 0)
|
||||
api_snapshot_id = current_playlist_data_from_api.get("snapshot_id")
|
||||
api_total_tracks = current_playlist_data_from_api.get("tracks", {}).get(
|
||||
"total", 0
|
||||
)
|
||||
|
||||
# Paginate through playlist tracks if necessary
|
||||
all_api_track_items = []
|
||||
@@ -121,16 +147,21 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
# If it doesn't, this part would need adjustment for robust pagination.
|
||||
# For now, we use the items from the initial fetch.
|
||||
|
||||
paginated_playlist_data = get_spotify_info(playlist_spotify_id, "playlist", offset=offset, limit=limit)
|
||||
if not paginated_playlist_data or 'tracks' not in paginated_playlist_data:
|
||||
paginated_playlist_data = get_spotify_info(
|
||||
playlist_spotify_id, "playlist", offset=offset, limit=limit
|
||||
)
|
||||
if (
|
||||
not paginated_playlist_data
|
||||
or "tracks" not in paginated_playlist_data
|
||||
):
|
||||
break
|
||||
|
||||
page_items = paginated_playlist_data.get('tracks', {}).get('items', [])
|
||||
page_items = paginated_playlist_data.get("tracks", {}).get("items", [])
|
||||
if not page_items:
|
||||
break
|
||||
all_api_track_items.extend(page_items)
|
||||
|
||||
if paginated_playlist_data.get('tracks', {}).get('next'):
|
||||
if paginated_playlist_data.get("tracks", {}).get("next"):
|
||||
offset += limit
|
||||
else:
|
||||
break
|
||||
@@ -138,9 +169,9 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
current_api_track_ids = set()
|
||||
api_track_id_to_item_map = {}
|
||||
for item in all_api_track_items: # Use all_api_track_items
|
||||
track = item.get('track')
|
||||
if track and track.get('id') and not track.get('is_local'):
|
||||
track_id = track['id']
|
||||
track = item.get("track")
|
||||
if track and track.get("id") and not track.get("is_local"):
|
||||
track_id = track["id"]
|
||||
current_api_track_ids.add(track_id)
|
||||
api_track_id_to_item_map[track_id] = item
|
||||
|
||||
@@ -149,74 +180,118 @@ def check_watched_playlists(specific_playlist_id: str = None):
|
||||
new_track_ids_for_download = current_api_track_ids - db_track_ids
|
||||
queued_for_download_count = 0
|
||||
if new_track_ids_for_download:
|
||||
logger.info(f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download.")
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Found {len(new_track_ids_for_download)} new tracks for playlist '{playlist_name}' to download."
|
||||
)
|
||||
for track_id in new_track_ids_for_download:
|
||||
api_item = api_track_id_to_item_map.get(track_id)
|
||||
if not api_item or not api_item.get("track"):
|
||||
logger.warning(f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue.")
|
||||
logger.warning(
|
||||
f"Playlist Watch Manager: Missing track details in API map for new track_id {track_id} in playlist {playlist_spotify_id}. Cannot queue."
|
||||
)
|
||||
continue
|
||||
|
||||
track_to_queue = api_item["track"]
|
||||
task_payload = {
|
||||
"download_type": "track",
|
||||
"url": construct_spotify_url(track_id, "track"),
|
||||
"name": track_to_queue.get('name', 'Unknown Track'),
|
||||
"artist": ", ".join([a['name'] for a in track_to_queue.get('artists', []) if a.get('name')]),
|
||||
"name": track_to_queue.get("name", "Unknown Track"),
|
||||
"artist": ", ".join(
|
||||
[
|
||||
a["name"]
|
||||
for a in track_to_queue.get("artists", [])
|
||||
if a.get("name")
|
||||
]
|
||||
),
|
||||
"orig_request": {
|
||||
"source": "playlist_watch",
|
||||
"playlist_id": playlist_spotify_id,
|
||||
"playlist_name": playlist_name,
|
||||
"track_spotify_id": track_id,
|
||||
"track_item_for_db": api_item # Pass full API item for DB update on completion
|
||||
}
|
||||
"track_item_for_db": api_item, # Pass full API item for DB update on completion
|
||||
},
|
||||
# "track_details_for_db" was old name, using track_item_for_db consistent with celery_tasks
|
||||
}
|
||||
try:
|
||||
task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True)
|
||||
task_id_or_none = download_queue_manager.add_task(
|
||||
task_payload, from_watch_job=True
|
||||
)
|
||||
if task_id_or_none: # Task was newly queued
|
||||
logger.info(f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'.")
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Queued download task {task_id_or_none} for new track {track_id} ('{track_to_queue.get('name')}') from playlist '{playlist_name}'."
|
||||
)
|
||||
queued_for_download_count += 1
|
||||
# If task_id_or_none is None, it was a duplicate and not re-queued, Celery manager handles logging.
|
||||
except Exception as e:
|
||||
logger.error(f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}", exc_info=True)
|
||||
logger.info(f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'.")
|
||||
logger.error(
|
||||
f"Playlist Watch Manager: Failed to queue download for new track {track_id} from playlist '{playlist_name}': {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Attempted to queue {queued_for_download_count} new tracks for playlist '{playlist_name}'."
|
||||
)
|
||||
else:
|
||||
logger.info(f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'.")
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: No new tracks to download for playlist '{playlist_name}'."
|
||||
)
|
||||
|
||||
# Update DB for tracks that are still present in API (e.g. update 'last_seen_in_spotify')
|
||||
# add_tracks_to_playlist_db handles INSERT OR REPLACE, updating existing entries.
|
||||
# We should pass all current API tracks to ensure their `last_seen_in_spotify` and `is_present_in_spotify` are updated.
|
||||
if all_api_track_items: # If there are any tracks in the API for this playlist
|
||||
logger.info(f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'.")
|
||||
if (
|
||||
all_api_track_items
|
||||
): # If there are any tracks in the API for this playlist
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Refreshing {len(all_api_track_items)} tracks from API in local DB for playlist '{playlist_name}'."
|
||||
)
|
||||
add_tracks_to_playlist_db(playlist_spotify_id, all_api_track_items)
|
||||
|
||||
|
||||
removed_db_ids = db_track_ids - current_api_track_ids
|
||||
if removed_db_ids:
|
||||
logger.info(f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB.")
|
||||
mark_tracks_as_not_present_in_spotify(playlist_spotify_id, list(removed_db_ids))
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: {len(removed_db_ids)} tracks removed from Spotify playlist '{playlist_name}'. Marking in DB."
|
||||
)
|
||||
mark_tracks_as_not_present_in_spotify(
|
||||
playlist_spotify_id, list(removed_db_ids)
|
||||
)
|
||||
|
||||
update_playlist_snapshot(playlist_spotify_id, api_snapshot_id, api_total_tracks) # api_total_tracks from initial fetch
|
||||
logger.info(f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}.")
|
||||
update_playlist_snapshot(
|
||||
playlist_spotify_id, api_snapshot_id, api_total_tracks
|
||||
) # api_total_tracks from initial fetch
|
||||
logger.info(
|
||||
f"Playlist Watch Manager: Finished checking playlist '{playlist_name}'. Snapshot ID updated. API Total Tracks: {api_total_tracks}."
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Playlist Watch Manager: Error processing playlist {playlist_spotify_id}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
time.sleep(max(1, config.get("delay_between_playlists_seconds", 2)))
|
||||
|
||||
logger.info("Playlist Watch Manager: Finished checking all watched playlists.")
|
||||
|
||||
|
||||
def check_watched_artists(specific_artist_id: str = None):
|
||||
"""Checks watched artists for new albums and queues downloads."""
|
||||
logger.info(f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Starting check. Specific artist: {specific_artist_id or 'All'}"
|
||||
)
|
||||
config = get_watch_config()
|
||||
watched_album_groups = [g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"])]
|
||||
logger.info(f"Artist Watch Manager: Watching for album groups: {watched_album_groups}")
|
||||
watched_album_groups = [
|
||||
g.lower() for g in config.get("watchedArtistAlbumGroup", ["album", "single"])
|
||||
]
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Watching for album groups: {watched_album_groups}"
|
||||
)
|
||||
|
||||
if specific_artist_id:
|
||||
artist_obj_in_db = get_watched_artist(specific_artist_id)
|
||||
if not artist_obj_in_db:
|
||||
logger.error(f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database.")
|
||||
logger.error(
|
||||
f"Artist Watch Manager: Artist {specific_artist_id} not found in watch database."
|
||||
)
|
||||
return
|
||||
artists_to_check = [artist_obj_in_db]
|
||||
else:
|
||||
@@ -227,9 +302,11 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
return
|
||||
|
||||
for artist_in_db in artists_to_check:
|
||||
artist_spotify_id = artist_in_db['spotify_id']
|
||||
artist_name = artist_in_db['name']
|
||||
logger.info(f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})...")
|
||||
artist_spotify_id = artist_in_db["spotify_id"]
|
||||
artist_name = artist_in_db["name"]
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Checking artist '{artist_name}' ({artist_spotify_id})..."
|
||||
)
|
||||
|
||||
try:
|
||||
# Spotify API for artist albums is paginated.
|
||||
@@ -237,58 +314,84 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
# Let's assume get_spotify_info(artist_id, 'artist-albums') returns a list of all album objects.
|
||||
# Or we implement pagination here.
|
||||
|
||||
all_artist_albums_from_api = []
|
||||
all_artist_albums_from_api: List[Dict[str, Any]] = []
|
||||
offset = 0
|
||||
limit = 50 # Spotify API limit for artist albums
|
||||
while True:
|
||||
# The 'artist-albums' type for get_spotify_info needs to support pagination params.
|
||||
# And return a list of album objects.
|
||||
logger.debug(f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}")
|
||||
artist_albums_page = get_spotify_info(artist_spotify_id, "artist_discography", limit=limit, offset=offset)
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Fetching albums for {artist_spotify_id}. Limit: {limit}, Offset: {offset}"
|
||||
)
|
||||
artist_albums_page = get_spotify_info(
|
||||
artist_spotify_id, "artist_discography", limit=limit, offset=offset
|
||||
)
|
||||
|
||||
if not artist_albums_page or not isinstance(artist_albums_page.get('items'), list):
|
||||
logger.warning(f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}")
|
||||
if not artist_albums_page or not isinstance(
|
||||
artist_albums_page.get("items"), list
|
||||
):
|
||||
logger.warning(
|
||||
f"Artist Watch Manager: No album items found or invalid format for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Response: {artist_albums_page}"
|
||||
)
|
||||
break
|
||||
|
||||
current_page_albums = artist_albums_page.get('items', [])
|
||||
current_page_albums = artist_albums_page.get("items", [])
|
||||
if not current_page_albums:
|
||||
logger.info(f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}.")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: No more albums on page for artist {artist_spotify_id} (name: '{artist_name}') at offset {offset}. Total fetched so far: {len(all_artist_albums_from_api)}."
|
||||
)
|
||||
break
|
||||
|
||||
logger.debug(f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'.")
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Fetched {len(current_page_albums)} albums on current page for artist '{artist_name}'."
|
||||
)
|
||||
all_artist_albums_from_api.extend(current_page_albums)
|
||||
|
||||
# Correct pagination: Check if Spotify indicates a next page URL
|
||||
# The `next` field in Spotify API responses is a URL to the next page or null.
|
||||
if artist_albums_page.get('next'):
|
||||
if artist_albums_page.get("next"):
|
||||
offset += limit # CORRECT: Increment offset by the limit used for the request
|
||||
else:
|
||||
logger.info(f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}.")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: No 'next' page URL for artist '{artist_name}'. Pagination complete. Total albums fetched: {len(all_artist_albums_from_api)}."
|
||||
)
|
||||
break
|
||||
|
||||
# total_albums_from_api = len(all_artist_albums_from_api)
|
||||
# Use the 'total' field from the API response for a more accurate count of all available albums (matching current API filter if any)
|
||||
api_reported_total_albums = artist_albums_page.get('total', 0) if 'artist_albums_page' in locals() and artist_albums_page else len(all_artist_albums_from_api)
|
||||
logger.info(f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}.")
|
||||
api_reported_total_albums = (
|
||||
artist_albums_page.get("total", 0)
|
||||
if "artist_albums_page" in locals() and artist_albums_page
|
||||
else len(all_artist_albums_from_api)
|
||||
)
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Fetched {len(all_artist_albums_from_api)} albums in total from API for artist '{artist_name}'. API reports total: {api_reported_total_albums}."
|
||||
)
|
||||
|
||||
db_album_ids = get_artist_album_ids_from_db(artist_spotify_id)
|
||||
logger.info(f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes.")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Found {len(db_album_ids)} albums in DB for artist '{artist_name}'. These will be skipped if re-encountered unless logic changes."
|
||||
)
|
||||
|
||||
queued_for_download_count = 0
|
||||
processed_album_ids_in_run = set() # To avoid processing duplicate album_ids if API returns them across pages (should not happen with correct pagination)
|
||||
|
||||
for album_data in all_artist_albums_from_api:
|
||||
album_id = album_data.get('id')
|
||||
album_name = album_data.get('name', 'Unknown Album')
|
||||
album_group = album_data.get('album_group', 'N/A').lower()
|
||||
album_type = album_data.get('album_type', 'N/A').lower()
|
||||
album_id = album_data.get("id")
|
||||
album_name = album_data.get("name", "Unknown Album")
|
||||
album_group = album_data.get("album_group", "N/A").lower()
|
||||
album_type = album_data.get("album_type", "N/A").lower()
|
||||
|
||||
if not album_id:
|
||||
logger.warning(f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}")
|
||||
logger.warning(
|
||||
f"Artist Watch Manager: Skipping album without ID for artist '{artist_name}'. Album data: {album_data}"
|
||||
)
|
||||
continue
|
||||
|
||||
if album_id in processed_album_ids_in_run:
|
||||
logger.debug(f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping.")
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Album '{album_name}' ({album_id}) already processed in this run. Skipping."
|
||||
)
|
||||
continue
|
||||
processed_album_ids_in_run.add(album_id)
|
||||
|
||||
@@ -296,19 +399,31 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
# The album_group field is generally preferred for this type of categorization as per Spotify docs.
|
||||
is_matching_group = album_group in watched_album_groups
|
||||
|
||||
logger.debug(f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}.")
|
||||
logger.debug(
|
||||
f"Artist '{artist_name}', Album '{album_name}' ({album_id}): album_group='{album_group}', album_type='{album_type}'. Watched groups: {watched_album_groups}. Match: {is_matching_group}."
|
||||
)
|
||||
|
||||
if not is_matching_group:
|
||||
logger.debug(f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}.")
|
||||
logger.debug(
|
||||
f"Artist Watch Manager: Skipping album '{album_name}' ({album_id}) by '{artist_name}' - group '{album_group}' not in watched list: {watched_album_groups}."
|
||||
)
|
||||
continue
|
||||
|
||||
logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group.")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' (group: {album_group}) IS a matching group."
|
||||
)
|
||||
|
||||
if album_id not in db_album_ids:
|
||||
logger.info(f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download.")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Found NEW matching album '{album_name}' ({album_id}) by '{artist_name}'. Queuing for download."
|
||||
)
|
||||
|
||||
album_artists_list = album_data.get('artists', [])
|
||||
album_main_artist_name = album_artists_list[0].get('name', 'Unknown Artist') if album_artists_list else 'Unknown Artist'
|
||||
album_artists_list = album_data.get("artists", [])
|
||||
album_main_artist_name = (
|
||||
album_artists_list[0].get("name", "Unknown Artist")
|
||||
if album_artists_list
|
||||
else "Unknown Artist"
|
||||
)
|
||||
|
||||
task_payload = {
|
||||
"download_type": "album", # Or "track" if downloading individual tracks of album later
|
||||
@@ -320,8 +435,8 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
"artist_spotify_id": artist_spotify_id, # Watched artist
|
||||
"artist_name": artist_name,
|
||||
"album_spotify_id": album_id,
|
||||
"album_data_for_db": album_data # Pass full API album object for DB update on completion/queuing
|
||||
}
|
||||
"album_data_for_db": album_data, # Pass full API album object for DB update on completion/queuing
|
||||
},
|
||||
}
|
||||
try:
|
||||
# Add to DB first with task_id, then queue. Or queue and add task_id to DB.
|
||||
@@ -332,38 +447,56 @@ def check_watched_artists(specific_artist_id: str = None):
|
||||
# Task_id will be added if successfully queued
|
||||
|
||||
# We should call add_task first, and if it returns a task_id (not a duplicate), then update our DB.
|
||||
task_id_or_none = download_queue_manager.add_task(task_payload, from_watch_job=True)
|
||||
task_id_or_none = download_queue_manager.add_task(
|
||||
task_payload, from_watch_job=True
|
||||
)
|
||||
|
||||
if task_id_or_none: # Task was newly queued
|
||||
# REMOVED: add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=task_id_or_none, is_download_complete=False)
|
||||
# The album will be added/updated in the DB by celery_tasks.py upon successful download completion.
|
||||
logger.info(f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success.")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Queued download task {task_id_or_none} for new album '{album_name}' from artist '{artist_name}'. DB entry will be created/updated on success."
|
||||
)
|
||||
queued_for_download_count += 1
|
||||
# If task_id_or_none is None, it was a duplicate. Celery manager handles logging.
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Artist Watch Manager: Failed to queue download for new album {album_id} ('{album_name}') from artist '{artist_name}': {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
else:
|
||||
logger.info(f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue.")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Album '{album_name}' ({album_id}) by '{artist_name}' already known in DB (ID found in db_album_ids). Skipping queue."
|
||||
)
|
||||
# Optionally, update its entry (e.g. last_seen, or if details changed), but for now, we only queue new ones.
|
||||
# add_or_update_album_for_artist(artist_spotify_id, album_data, task_id=None, is_download_complete=False) # would update added_to_db_at
|
||||
|
||||
logger.info(f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums.")
|
||||
logger.info(
|
||||
f"Artist Watch Manager: For artist '{artist_name}', processed {len(all_artist_albums_from_api)} API albums, attempted to queue {queued_for_download_count} new albums."
|
||||
)
|
||||
|
||||
update_artist_metadata_after_check(artist_spotify_id, api_reported_total_albums)
|
||||
logger.info(f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}.")
|
||||
update_artist_metadata_after_check(
|
||||
artist_spotify_id, api_reported_total_albums
|
||||
)
|
||||
logger.info(
|
||||
f"Artist Watch Manager: Finished checking artist '{artist_name}'. DB metadata updated. API reported total albums (for API filter): {api_reported_total_albums}."
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Artist Watch Manager: Error processing artist {artist_spotify_id} ('{artist_name}'): {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
time.sleep(max(1, config.get("delay_between_artists_seconds", 5)))
|
||||
|
||||
logger.info("Artist Watch Manager: Finished checking all watched artists.")
|
||||
|
||||
|
||||
def playlist_watch_scheduler():
|
||||
"""Periodically calls check_watched_playlists and check_watched_artists."""
|
||||
logger.info("Watch Scheduler: Thread started.")
|
||||
config = get_watch_config() # Load config once at start, or reload each loop? Reload each loop for dynamic changes.
|
||||
|
||||
while not STOP_EVENT.is_set():
|
||||
current_config = get_watch_config() # Get latest config for this run
|
||||
@@ -371,8 +504,12 @@ def playlist_watch_scheduler():
|
||||
watch_enabled = current_config.get("enabled", False) # Get enabled status
|
||||
|
||||
if not watch_enabled:
|
||||
logger.info("Watch Scheduler: Watch feature is disabled in config. Skipping checks.")
|
||||
STOP_EVENT.wait(interval) # Still respect poll interval for checking config again
|
||||
logger.info(
|
||||
"Watch Scheduler: Watch feature is disabled in config. Skipping checks."
|
||||
)
|
||||
STOP_EVENT.wait(
|
||||
interval
|
||||
) # Still respect poll interval for checking config again
|
||||
continue # Skip to next iteration
|
||||
|
||||
try:
|
||||
@@ -380,41 +517,61 @@ def playlist_watch_scheduler():
|
||||
check_watched_playlists()
|
||||
logger.info("Watch Scheduler: Playlist check run completed.")
|
||||
except Exception as e:
|
||||
logger.error(f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Watch Scheduler: Unhandled exception during check_watched_playlists: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Add a small delay between playlist and artist checks if desired
|
||||
# time.sleep(current_config.get("delay_between_check_types_seconds", 10))
|
||||
if STOP_EVENT.is_set(): break # Check stop event again before starting artist check
|
||||
if STOP_EVENT.is_set():
|
||||
break # Check stop event again before starting artist check
|
||||
|
||||
try:
|
||||
logger.info("Watch Scheduler: Starting artist check run.")
|
||||
check_watched_artists()
|
||||
logger.info("Watch Scheduler: Artist check run completed.")
|
||||
except Exception as e:
|
||||
logger.error(f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}", exc_info=True)
|
||||
logger.error(
|
||||
f"Watch Scheduler: Unhandled exception during check_watched_artists: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
logger.info(f"Watch Scheduler: All checks complete. Next run in {interval} seconds.")
|
||||
logger.info(
|
||||
f"Watch Scheduler: All checks complete. Next run in {interval} seconds."
|
||||
)
|
||||
STOP_EVENT.wait(interval)
|
||||
logger.info("Watch Scheduler: Thread stopped.")
|
||||
|
||||
|
||||
# --- Global thread for the scheduler ---
|
||||
_watch_scheduler_thread = None # Renamed from _playlist_watch_thread
|
||||
|
||||
|
||||
def start_watch_manager(): # Renamed from start_playlist_watch_manager
|
||||
global _watch_scheduler_thread
|
||||
if _watch_scheduler_thread is None or not _watch_scheduler_thread.is_alive():
|
||||
STOP_EVENT.clear()
|
||||
# Initialize DBs on start
|
||||
from routes.utils.watch.db import init_playlists_db, init_artists_db # Updated import
|
||||
from routes.utils.watch.db import (
|
||||
init_playlists_db,
|
||||
init_artists_db,
|
||||
) # Updated import
|
||||
|
||||
init_playlists_db() # For playlists
|
||||
init_artists_db() # For artists
|
||||
|
||||
_watch_scheduler_thread = threading.Thread(target=playlist_watch_scheduler, daemon=True)
|
||||
_watch_scheduler_thread = threading.Thread(
|
||||
target=playlist_watch_scheduler, daemon=True
|
||||
)
|
||||
_watch_scheduler_thread.start()
|
||||
logger.info("Watch Manager: Background scheduler started (includes playlists and artists).")
|
||||
logger.info(
|
||||
"Watch Manager: Background scheduler started (includes playlists and artists)."
|
||||
)
|
||||
else:
|
||||
logger.info("Watch Manager: Background scheduler already running.")
|
||||
|
||||
|
||||
def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
|
||||
global _watch_scheduler_thread
|
||||
if _watch_scheduler_thread and _watch_scheduler_thread.is_alive():
|
||||
@@ -429,5 +586,6 @@ def stop_watch_manager(): # Renamed from stop_playlist_watch_manager
|
||||
else:
|
||||
logger.info("Watch Manager: Background scheduler not running.")
|
||||
|
||||
|
||||
# If this module is imported, and you want to auto-start the manager, you could call start_watch_manager() here.
|
||||
# However, it's usually better to explicitly start it from the main application/__init__.py.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
|
||||
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||
<svg width="800px" height="800px" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="Warning / Info">
|
||||
|
||||
|
Before Width: | Height: | Size: 531 B After Width: | Height: | Size: 527 B |
@@ -6,6 +6,7 @@
|
||||
<g id="🔍-System-Icons" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||
<g id="ic_fluent_missing_metadata_24_filled" fill="#212121" fill-rule="nonzero">
|
||||
<path d="M17.5,12 C20.5376,12 23,14.4624 23,17.5 C23,20.5376 20.5376,23 17.5,23 C14.4624,23 12,20.5376 12,17.5 C12,14.4624 14.4624,12 17.5,12 Z M19.7501,2 C20.9927,2 22.0001,3.00736 22.0001,4.25 L22.0001,9.71196 C22.0001,10.50198 21.7124729,11.2623046 21.1951419,11.8530093 L21.0222,12.0361 C20.0073,11.3805 18.7981,11 17.5,11 C13.9101,11 11,13.9101 11,17.5 C11,18.7703 11.3644,19.9554 11.9943,20.9567 C10.7373,21.7569 9.05064,21.6098 7.95104,20.5143 L3.48934,16.0592 C2.21887,14.7913 2.21724,12.7334 3.48556,11.4632 L11.9852,2.95334 C12.5948,2.34297 13.4221,2 14.2847,2 L19.7501,2 Z M17.5,19.88 C17.1551,19.88 16.8755,20.1596 16.8755,20.5045 C16.8755,20.8494 17.1551,21.129 17.5,21.129 C17.8449,21.129 18.1245,20.8494 18.1245,20.5045 C18.1245,20.1596 17.8449,19.88 17.5,19.88 Z M17.5,14.0031 C16.4521,14.0031 15.6357,14.8205 15.6467,15.9574 C15.6493,16.2335 15.8753,16.4552 16.1514,16.4526 C16.4276,16.4499 16.6493,16.2239 16.6465901,15.9478 C16.6411,15.3688 17.0063,15.0031 17.5,15.0031 C17.9724,15.0031 18.3534,15.395 18.3534,15.9526 C18.3534,16.1448571 18.298151,16.2948694 18.1295283,16.5141003 L18.0355,16.63 L17.9365,16.7432 L17.6711,17.0333 C17.1868,17.5749 17,17.9255 17,18.5006 C17,18.7767 17.2239,19.0006 17.5,19.0006 C17.7762,19.0006 18,18.7767 18,18.5006 C18,18.297425 18.0585703,18.1416422 18.2388846,17.9103879 L18.3238,17.8063 L18.4247,17.6908 L18.6905,17.4003 C19.1682,16.866 19.3534,16.5186 19.3534,15.9526 C19.3534,14.8489 18.5311,14.0031 17.5,14.0031 Z M17,5.50218 C16.1716,5.50218 15.5001,6.17374 15.5001,7.00216 C15.5001,7.83057 16.1716,8.50213 17,8.50213 C17.8284,8.50213 18.5,7.83057 18.5,7.00216 C18.5,6.17374 17.8284,5.50218 17,5.50218 Z" id="🎨-Color">
|
||||
|
||||
</path>
|
||||
</g>
|
||||
</g>
|
||||
|
||||
|
Before Width: | Height: | Size: 2.2 KiB After Width: | Height: | Size: 2.2 KiB |
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2017", // Specify ECMAScript target version
|
||||
"module": "ES2020", // Specify module code generation
|
||||
"strict": true, // Enable all strict type-checking options
|
||||
"esModuleInterop": true, // Enables emit interoperability between CommonJS and ES Modules
|
||||
"skipLibCheck": true, // Skip type checking of declaration files
|
||||
"forceConsistentCasingInFileNames": true, // Disallow inconsistently-cased references to the same file.
|
||||
"target": "ES2017",
|
||||
"module": "ES2020",
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"outDir": "./static/js",
|
||||
"rootDir": "./src/js"
|
||||
},
|
||||
@@ -20,6 +20,6 @@
|
||||
"src/js/track.ts"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules" // Specifies an array of filenames or patterns that should be skipped when resolving include.
|
||||
"node_modules"
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user