Fix config inconsisten schema

This commit is contained in:
Xoconoch
2025-08-11 09:30:28 -06:00
parent fae24489ef
commit de1e44e944
9 changed files with 356 additions and 328 deletions

View File

@@ -36,18 +36,42 @@ DEFAULT_MAIN_CONFIG = {
"realTime": False,
"customDirFormat": "%ar_album%/%album%",
"customTrackFormat": "%tracknum%. %music%",
"tracknum_padding": True,
"save_cover": True,
"tracknumPadding": True,
"saveCover": True,
"maxConcurrentDownloads": 3,
"maxRetries": 3,
"retryDelaySeconds": 5,
"retry_delay_increase": 5,
"retryDelayIncrease": 5,
"convertTo": None,
"bitrate": None,
"artist_separator": "; ",
"artistSeparator": "; ",
"recursiveQuality": False,
"watch": {},
}
def _migrate_legacy_keys(cfg: dict) -> tuple[dict, bool]:
"""Return a new config dict with legacy snake_case keys migrated to camelCase. Also normalizes nested watch if present."""
migrated = False
out = dict(cfg)
legacy_map = {
"tracknum_padding": "tracknumPadding",
"save_cover": "saveCover",
"retry_delay_increase": "retryDelayIncrease",
"artist_separator": "artistSeparator",
"recursive_quality": "recursiveQuality",
}
for legacy, camel in legacy_map.items():
if legacy in out and camel not in out:
out[camel] = out.pop(legacy)
migrated = True
# Ensure watch exists
if "watch" not in out or not isinstance(out.get("watch"), dict):
out["watch"] = {}
migrated = True
return out, migrated
def get_config_params():
"""
Get configuration parameters from the config file.
@@ -68,7 +92,10 @@ def get_config_params():
return DEFAULT_MAIN_CONFIG.copy() # Return a copy of defaults
with open(CONFIG_FILE_PATH, "r") as f:
config = json.load(f)
loaded = json.load(f) or {}
# Migrate legacy keys
config, migrated = _migrate_legacy_keys(loaded)
# Ensure all default keys are present in the loaded config
updated = False
@@ -77,9 +104,9 @@ def get_config_params():
config[key] = value
updated = True
if updated:
if updated or migrated:
logger.info(
f"Configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults."
f"Configuration at {CONFIG_FILE_PATH} updated (defaults{' and migration' if migrated else ''})."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(config, f, indent=4)
@@ -99,7 +126,7 @@ config_params_values = get_config_params() # Renamed to avoid conflict with mod
MAX_CONCURRENT_DL = config_params_values.get("maxConcurrentDownloads", 3)
MAX_RETRIES = config_params_values.get("maxRetries", 3)
RETRY_DELAY = config_params_values.get("retryDelaySeconds", 5)
RETRY_DELAY_INCREASE = config_params_values.get("retry_delay_increase", 5)
RETRY_DELAY_INCREASE = config_params_values.get("retryDelayIncrease", 5)
# Define task queues
task_queues = {

View File

@@ -53,15 +53,24 @@ def get_config_params():
"realTime": config.get("realTime", False),
"customDirFormat": config.get("customDirFormat", "%ar_album%/%album%"),
"customTrackFormat": config.get("customTrackFormat", "%tracknum%. %music%"),
"tracknum_padding": config.get("tracknum_padding", True),
"save_cover": config.get("save_cover", True),
"tracknumPadding": config.get(
"tracknumPadding", config.get("tracknum_padding", True)
),
"saveCover": config.get("saveCover", config.get("save_cover", True)),
"maxRetries": config.get("maxRetries", 3),
"retryDelaySeconds": config.get("retryDelaySeconds", 5),
"retry_delay_increase": config.get("retry_delay_increase", 5),
"retryDelayIncrease": config.get(
"retryDelayIncrease", config.get("retry_delay_increase", 5)
),
"convertTo": config.get("convertTo", None),
"bitrate": config.get("bitrate", None),
"artist_separator": config.get("artist_separator", "; "),
"recursive_quality": config.get("recursive_quality", False),
"artistSeparator": config.get(
"artistSeparator", config.get("artist_separator", "; ")
),
"recursiveQuality": config.get(
"recursiveQuality", config.get("recursive_quality", False)
),
"watch": config.get("watch", {}),
}
except Exception as e:
logger.error(f"Error reading config for parameters: {e}")
@@ -75,15 +84,16 @@ def get_config_params():
"realTime": False,
"customDirFormat": "%ar_album%/%album%",
"customTrackFormat": "%tracknum%. %music%",
"tracknum_padding": True,
"save_cover": True,
"tracknumPadding": True,
"saveCover": True,
"maxRetries": 3,
"retryDelaySeconds": 5,
"retry_delay_increase": 5,
"retryDelayIncrease": 5,
"convertTo": None, # Default for conversion
"bitrate": None, # Default for bitrate
"artist_separator": "; ",
"recursive_quality": False,
"artistSeparator": "; ",
"recursiveQuality": False,
"watch": {},
}
@@ -381,10 +391,10 @@ class CeleryDownloadQueueManager:
),
"pad_tracks": self._parse_bool_param(
original_request.get("tracknum_padding"),
config_params["tracknum_padding"],
config_params["tracknumPadding"],
),
"save_cover": self._parse_bool_param(
original_request.get("save_cover"), config_params["save_cover"]
original_request.get("save_cover"), config_params["saveCover"]
),
"convertTo": original_request.get(
"convertTo", config_params.get("convertTo")
@@ -393,11 +403,11 @@ class CeleryDownloadQueueManager:
"bitrate", config_params.get("bitrate")
),
"artist_separator": original_request.get(
"artist_separator", config_params.get("artist_separator", "; ")
"artist_separator", config_params.get("artistSeparator", "; ")
),
"recursive_quality": self._parse_bool_param(
original_request.get("recursive_quality"),
config_params.get("recursive_quality", False),
config_params.get("recursiveQuality", False),
),
"retry_count": 0,
"original_request": original_request,

View File

@@ -480,7 +480,7 @@ def retry_task(task_id):
config_params = get_config_params()
max_retries = config_params.get("maxRetries", 3)
initial_retry_delay = config_params.get("retryDelaySeconds", 5)
retry_delay_increase = config_params.get("retry_delay_increase", 5)
retry_delay_increase = config_params.get("retryDelayIncrease", 5)
# Check if we've exceeded max retries
if retry_count >= max_retries:
@@ -1612,16 +1612,16 @@ def download_track(self, **task_data):
config_params.get("customTrackFormat", "%tracknum%. %music%"),
)
pad_tracks = task_data.get(
"pad_tracks", config_params.get("tracknum_padding", True)
"pad_tracks", config_params.get("tracknumPadding", True)
)
save_cover = task_data.get("save_cover", config_params.get("save_cover", True))
save_cover = task_data.get("save_cover", config_params.get("saveCover", True))
convert_to = task_data.get("convertTo", config_params.get("convertTo"))
bitrate = task_data.get("bitrate", config_params.get("bitrate"))
recursive_quality = task_data.get(
"recursive_quality", config_params.get("recursive_quality", False)
"recursive_quality", config_params.get("recursiveQuality", False)
)
artist_separator = task_data.get(
"artist_separator", config_params.get("artist_separator", "; ")
"artist_separator", config_params.get("artistSeparator", "; ")
)
# Execute the download - service is now determined from URL
@@ -1707,16 +1707,16 @@ def download_album(self, **task_data):
config_params.get("customTrackFormat", "%tracknum%. %music%"),
)
pad_tracks = task_data.get(
"pad_tracks", config_params.get("tracknum_padding", True)
"pad_tracks", config_params.get("tracknumPadding", True)
)
save_cover = task_data.get("save_cover", config_params.get("save_cover", True))
save_cover = task_data.get("save_cover", config_params.get("saveCover", True))
convert_to = task_data.get("convertTo", config_params.get("convertTo"))
bitrate = task_data.get("bitrate", config_params.get("bitrate"))
recursive_quality = task_data.get(
"recursive_quality", config_params.get("recursive_quality", False)
"recursive_quality", config_params.get("recursiveQuality", False)
)
artist_separator = task_data.get(
"artist_separator", config_params.get("artist_separator", "; ")
"artist_separator", config_params.get("artistSeparator", "; ")
)
# Execute the download - service is now determined from URL
@@ -1802,16 +1802,16 @@ def download_playlist(self, **task_data):
config_params.get("customTrackFormat", "%tracknum%. %music%"),
)
pad_tracks = task_data.get(
"pad_tracks", config_params.get("tracknum_padding", True)
"pad_tracks", config_params.get("tracknumPadding", True)
)
save_cover = task_data.get("save_cover", config_params.get("save_cover", True))
save_cover = task_data.get("save_cover", config_params.get("saveCover", True))
convert_to = task_data.get("convertTo", config_params.get("convertTo"))
bitrate = task_data.get("bitrate", config_params.get("bitrate"))
recursive_quality = task_data.get(
"recursive_quality", config_params.get("recursive_quality", False)
"recursive_quality", config_params.get("recursiveQuality", False)
)
artist_separator = task_data.get(
"artist_separator", config_params.get("artist_separator", "; ")
"artist_separator", config_params.get("artistSeparator", "; ")
)
# Get retry parameters
@@ -1819,7 +1819,7 @@ def download_playlist(self, **task_data):
"initial_retry_delay", config_params.get("retryDelaySeconds", 5)
)
retry_delay_increase = task_data.get(
"retry_delay_increase", config_params.get("retry_delay_increase", 5)
"retry_delay_increase", config_params.get("retryDelayIncrease", 5)
)
max_retries = task_data.get("max_retries", config_params.get("maxRetries", 3))

View File

@@ -41,6 +41,7 @@ EXPECTED_PLAYLIST_TRACKS_COLUMNS = {
"is_present_in_spotify": "INTEGER DEFAULT 1",
"last_seen_in_spotify": "INTEGER",
"snapshot_id": "TEXT", # Track the snapshot_id when this track was added/updated
"final_path": "TEXT", # Absolute path of the downloaded file from deezspot callback
}
EXPECTED_WATCHED_ARTISTS_COLUMNS = {
@@ -883,8 +884,12 @@ def add_single_track_to_playlist_db(
if not album_artist_names:
album_artist_names = "N/A"
# Extract final_path from status_info if present (new deezspot field)
status_info = callback_data.get("status_info", {}) or {}
final_path = status_info.get("final_path")
logger.debug(
f"Extracted metadata from deezspot callback for '{track_name}': track_number={track_number}"
f"Extracted metadata from deezspot callback for '{track_name}': track_number={track_number}, final_path={final_path}"
)
except Exception as e:
@@ -921,6 +926,7 @@ def add_single_track_to_playlist_db(
1,
current_time,
snapshot_id,
final_path,
)
try:
with _get_playlists_db_connection() as conn: # Use playlists connection
@@ -929,8 +935,8 @@ def add_single_track_to_playlist_db(
cursor.execute(
f"""
INSERT OR REPLACE INTO {table_name}
(spotify_track_id, title, artist_names, album_name, album_artist_names, track_number, album_spotify_id, duration_ms, added_at_playlist, added_to_db, is_present_in_spotify, last_seen_in_spotify, snapshot_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
(spotify_track_id, title, artist_names, album_name, album_artist_names, track_number, album_spotify_id, duration_ms, added_at_playlist, added_to_db, is_present_in_spotify, last_seen_in_spotify, snapshot_id, final_path)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
track_data_tuple,
)

View File

@@ -2,7 +2,6 @@ import time
import threading
import logging
import json
import os
import re
from pathlib import Path
from typing import Any, List, Dict
@@ -32,69 +31,122 @@ from routes.utils.get_info import (
from routes.utils.celery_queue_manager import download_queue_manager
logger = logging.getLogger(__name__)
CONFIG_FILE_PATH = Path("./data/config/watch.json")
MAIN_CONFIG_FILE_PATH = Path("./data/config/main.json")
WATCH_OLD_FILE_PATH = Path("./data/config/watch.json")
STOP_EVENT = threading.Event()
# Format mapping for audio file conversions
AUDIO_FORMAT_EXTENSIONS = {
"mp3": ".mp3",
"flac": ".flac",
"m4a": ".m4a",
"aac": ".m4a",
"ogg": ".ogg",
"wav": ".wav",
}
DEFAULT_WATCH_CONFIG = {
"enabled": False,
"watchPollIntervalSeconds": 3600,
"max_tracks_per_run": 50, # For playlists
"watchedArtistAlbumGroup": ["album", "single"], # Default for artists
"delay_between_playlists_seconds": 2,
"delay_between_artists_seconds": 5, # Added for artists
"use_snapshot_id_checking": True, # Enable snapshot_id checking for efficiency
"maxTracksPerRun": 50,
"watchedArtistAlbumGroup": ["album", "single"],
"delayBetweenPlaylistsSeconds": 2,
"delayBetweenArtistsSeconds": 5,
"useSnapshotIdChecking": True,
}
def get_watch_config():
"""Loads the watch configuration from watch.json.
Creates the file with defaults if it doesn't exist.
Ensures all default keys are present in the loaded config.
"""Loads the watch configuration from main.json's 'watch' key (camelCase).
Applies defaults and migrates legacy snake_case keys if found.
"""
try:
# Ensure ./data/config directory exists
CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
if not CONFIG_FILE_PATH.exists():
logger.info(
f"{CONFIG_FILE_PATH} not found. Creating with default watch config."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(DEFAULT_WATCH_CONFIG, f, indent=2)
MAIN_CONFIG_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
if not MAIN_CONFIG_FILE_PATH.exists():
# Create main config with default watch block
with open(MAIN_CONFIG_FILE_PATH, "w") as f:
json.dump({"watch": DEFAULT_WATCH_CONFIG}, f, indent=2)
return DEFAULT_WATCH_CONFIG.copy()
with open(CONFIG_FILE_PATH, "r") as f:
config = json.load(f)
with open(MAIN_CONFIG_FILE_PATH, "r") as f:
main_cfg = json.load(f) or {}
updated = False
for key, value in DEFAULT_WATCH_CONFIG.items():
if key not in config:
config[key] = value
updated = True
watch_cfg = main_cfg.get("watch", {}) or {}
if updated:
logger.info(
f"Watch configuration at {CONFIG_FILE_PATH} was missing some default keys. Updated with defaults."
)
with open(CONFIG_FILE_PATH, "w") as f:
json.dump(config, f, indent=2)
return config
# Detect legacy watch.json and migrate it into main.json's watch key
legacy_file_found = False
legacy_migrated_ok = False
if WATCH_OLD_FILE_PATH.exists():
try:
with open(WATCH_OLD_FILE_PATH, "r") as wf:
legacy_watch = json.load(wf) or {}
# Map legacy snake_case keys to camelCase
legacy_to_camel_watch = {
"enabled": "enabled",
"watchPollIntervalSeconds": "watchPollIntervalSeconds",
"watch_poll_interval_seconds": "watchPollIntervalSeconds",
"watchedArtistAlbumGroup": "watchedArtistAlbumGroup",
"watched_artist_album_group": "watchedArtistAlbumGroup",
"delay_between_playlists_seconds": "delayBetweenPlaylistsSeconds",
"delay_between_artists_seconds": "delayBetweenArtistsSeconds",
"use_snapshot_id_checking": "useSnapshotIdChecking",
"max_tracks_per_run": "maxTracksPerRun",
}
migrated_watch = {}
for k, v in legacy_watch.items():
target_key = legacy_to_camel_watch.get(k, k)
migrated_watch[target_key] = v
# Merge with existing watch (legacy overrides existing)
watch_cfg.update(migrated_watch)
migrated = True
legacy_file_found = True
legacy_migrated_ok = True
except Exception as le:
logger.error(
f"Failed to migrate legacy watch.json: {le}", exc_info=True
)
# Migration: map legacy keys inside watch block if present
# Keep camelCase names in memory
legacy_to_camel = {
"watch_poll_interval_seconds": "watchPollIntervalSeconds",
"watched_artist_album_group": "watchedArtistAlbumGroup",
"delay_between_playlists_seconds": "delayBetweenPlaylistsSeconds",
"delay_between_artists_seconds": "delayBetweenArtistsSeconds",
"use_snapshot_id_checking": "useSnapshotIdChecking",
"max_tracks_per_run": "maxTracksPerRun",
}
migrated = False
for legacy_key, camel_key in legacy_to_camel.items():
if legacy_key in watch_cfg and camel_key not in watch_cfg:
watch_cfg[camel_key] = watch_cfg.pop(legacy_key)
migrated = True
# Ensure defaults
for k, v in DEFAULT_WATCH_CONFIG.items():
if k not in watch_cfg:
watch_cfg[k] = v
if migrated or legacy_file_found:
# Persist migration back to main.json
main_cfg["watch"] = watch_cfg
with open(MAIN_CONFIG_FILE_PATH, "w") as f:
json.dump(main_cfg, f, indent=2)
# Rename legacy file to avoid re-migration next start
if legacy_file_found and legacy_migrated_ok:
try:
WATCH_OLD_FILE_PATH.rename(
WATCH_OLD_FILE_PATH.with_suffix(".migrated")
)
logger.info(
f"Legacy watch.json migrated and renamed to {WATCH_OLD_FILE_PATH.with_suffix('.migrated')}"
)
except Exception:
try:
WATCH_OLD_FILE_PATH.unlink()
logger.info("Legacy watch.json migrated and removed.")
except Exception:
pass
return watch_cfg
except Exception as e:
logger.error(
f"Error loading or creating watch config at {CONFIG_FILE_PATH}: {e}",
f"Error loading watch config from {MAIN_CONFIG_FILE_PATH}: {e}",
exc_info=True,
)
return DEFAULT_WATCH_CONFIG.copy() # Fallback
return DEFAULT_WATCH_CONFIG.copy()
def construct_spotify_url(item_id, item_type="track"):
@@ -267,7 +319,7 @@ def check_watched_playlists(specific_playlist_id: str = None):
f"Playlist Watch Manager: Starting check. Specific playlist: {specific_playlist_id or 'All'}"
)
config = get_watch_config()
use_snapshot_checking = config.get("use_snapshot_id_checking", True)
use_snapshot_checking = config.get("useSnapshotIdChecking", True)
if specific_playlist_id:
playlist_obj = get_watched_playlist(specific_playlist_id)
@@ -546,7 +598,7 @@ def check_watched_playlists(specific_playlist_id: str = None):
exc_info=True,
)
time.sleep(max(1, config.get("delay_between_playlists_seconds", 2)))
time.sleep(max(1, config.get("delayBetweenPlaylistsSeconds", 2)))
logger.info("Playlist Watch Manager: Finished checking all watched playlists.")
@@ -766,7 +818,7 @@ def check_watched_artists(specific_artist_id: str = None):
exc_info=True,
)
time.sleep(max(1, config.get("delay_between_artists_seconds", 5)))
time.sleep(max(1, config.get("delayBetweenArtistsSeconds", 5)))
logger.info("Artist Watch Manager: Finished checking all watched artists.")
@@ -920,7 +972,7 @@ def get_playlist_tracks_for_m3u(playlist_spotify_id: str) -> List[Dict[str, Any]
# Get all tracks that are present in Spotify
cursor.execute(f"""
SELECT spotify_track_id, title, artist_names, album_name,
album_artist_names, track_number, duration_ms
album_artist_names, track_number, duration_ms, final_path
FROM {table_name}
WHERE is_present_in_spotify = 1
ORDER BY track_number, title
@@ -938,6 +990,9 @@ def get_playlist_tracks_for_m3u(playlist_spotify_id: str) -> List[Dict[str, Any]
or "Unknown Artist",
"track_number": row["track_number"] or 0,
"duration_ms": row["duration_ms"] or 0,
"final_path": row["final_path"]
if "final_path" in row.keys()
else None,
}
)
@@ -951,136 +1006,6 @@ def get_playlist_tracks_for_m3u(playlist_spotify_id: str) -> List[Dict[str, Any]
return tracks
def generate_track_file_path(
track: Dict[str, Any],
custom_dir_format: str,
custom_track_format: str,
convert_to: str = None,
) -> str:
"""
Generate the file path for a track based on custom format strings.
This mimics the path generation logic used by the deezspot library.
Args:
track: Track metadata dictionary
custom_dir_format: Directory format string (e.g., "%ar_album%/%album%")
custom_track_format: Track format string (e.g., "%tracknum%. %music% - %artist%")
convert_to: Target conversion format (e.g., "mp3", "flac", "m4a")
Returns:
Generated file path relative to output directory
"""
try:
# Extract metadata
artist_names = track.get("artist_names", "Unknown Artist")
album_name = track.get("album_name", "Unknown Album")
album_artist_names = track.get("album_artist_names", "Unknown Artist")
title = track.get("title", "Unknown Track")
track_number = track.get("track_number", 0)
duration_ms = track.get("duration_ms", 0)
# Use album artist for directory structure, main artist for track name
main_artist = artist_names.split(", ")[0] if artist_names else "Unknown Artist"
album_artist = (
album_artist_names.split(", ")[0] if album_artist_names else main_artist
)
# Clean names for filesystem
def clean_name(name):
# Remove or replace characters that are problematic in filenames
name = re.sub(r'[<>:"/\\|?*]', "_", str(name))
name = re.sub(r"[\x00-\x1f]", "", name) # Remove control characters
return name.strip()
clean_album_artist = clean_name(album_artist)
clean_album = clean_name(album_name)
clean_main_artist = clean_name(main_artist)
clean_title = clean_name(title)
# Prepare artist and album artist lists
artist_list = [clean_name(a) for a in re.split(r"\s*,\s*", artist_names or "") if a.strip()] or [clean_main_artist]
album_artist_list = [clean_name(a) for a in re.split(r"\s*,\s*", album_artist_names or "") if a.strip()] or [clean_album_artist]
# Prepare placeholder replacements
replacements = {
# Common placeholders
"%music%": clean_title,
"%artist%": clean_main_artist,
"%album%": clean_album,
"%ar_album%": clean_album_artist,
"%tracknum%": f"{track_number:02d}" if track_number > 0 else "00",
"%year%": "", # Not available in current DB schema
# Additional placeholders (not available in current DB schema, using defaults)
"%discnum%": "01", # Default to disc 1
"%date%": "", # Not available
"%genre%": "", # Not available
"%isrc%": "", # Not available
"%explicit%": "", # Not available
"%duration%": str(duration_ms // 1000)
if duration_ms > 0
else "0", # Convert ms to seconds
}
artist_indices = {int(i) for i in re.findall(r"%artist_(\d+)%", custom_dir_format + custom_track_format)}
ar_album_indices = {int(i) for i in re.findall(r"%ar_album_(\d+)%", custom_dir_format + custom_track_format)}
# Replace artist placeholders with actual values
for i in artist_indices:
idx = i - 1
value = artist_list[idx] if 0 <= idx < len(artist_list) else artist_list[0]
replacements[f"%artist_{i}%"] = value
# Replace album artist placeholders with actual values
for i in ar_album_indices:
idx = i - 1
value = album_artist_list[idx] if 0 <= idx < len(album_artist_list) else album_artist_list[0]
replacements[f"%ar_album_{i}%"] = value
# Apply replacements to directory format
dir_path = custom_dir_format
for placeholder, value in replacements.items():
dir_path = dir_path.replace(placeholder, value)
# Apply replacements to track format
track_filename = custom_track_format
for placeholder, value in replacements.items():
track_filename = track_filename.replace(placeholder, value)
# Combine and clean up path
full_path = os.path.join(dir_path, track_filename)
full_path = os.path.normpath(full_path)
# Determine file extension based on convert_to setting or default to mp3
if not any(
full_path.lower().endswith(ext)
for ext in [".mp3", ".flac", ".m4a", ".ogg", ".wav"]
):
if convert_to:
extension = AUDIO_FORMAT_EXTENSIONS.get(convert_to.lower(), ".mp3")
full_path += extension
else:
full_path += ".mp3" # Default fallback
return full_path
except Exception as e:
logger.error(
f"Error generating file path for track {track.get('title', 'Unknown')}: {e}"
)
# Return a fallback path with appropriate extension
safe_title = re.sub(
r'[<>:"/\\|?*\x00-\x1f]', "_", str(track.get("title", "Unknown Track"))
)
# Determine extension for fallback
if convert_to:
extension = AUDIO_FORMAT_EXTENSIONS.get(convert_to.lower(), ".mp3")
else:
extension = ".mp3"
return f"Unknown Artist/Unknown Album/{safe_title}{extension}"
def update_playlist_m3u_file(playlist_spotify_id: str):
"""
Generate/update the m3u file for a watched playlist based on tracks in the database.
@@ -1100,13 +1025,7 @@ def update_playlist_m3u_file(playlist_spotify_id: str):
playlist_name = playlist_info.get("name", "Unknown Playlist")
# Get configuration settings
from routes.utils.celery_config import get_config_params
config = get_config_params()
custom_dir_format = config.get("customDirFormat", "%ar_album%/%album%")
custom_track_format = config.get("customTrackFormat", "%tracknum%. %music%")
convert_to = config.get("convertTo") # Get conversion format setting
output_dir = (
"./downloads" # This matches the output_dir used in download functions
)
@@ -1131,20 +1050,26 @@ def update_playlist_m3u_file(playlist_spotify_id: str):
# Generate m3u content
m3u_lines = ["#EXTM3U"]
included_count = 0
skipped_missing_final_path = 0
for track in tracks:
# Generate file path for this track
track_file_path = generate_track_file_path(
track, custom_dir_format, custom_track_format, convert_to
)
# Create relative path from m3u file location to track file
# M3U file is in ./downloads/playlists/
# Track files are in ./downloads/{custom_dir_format}/
relative_path = os.path.join("..", track_file_path)
relative_path = relative_path.replace(
"\\", "/"
) # Use forward slashes for m3u compatibility
# Use final_path from deezspot summary and convert from ./downloads to ../ relative path
final_path = track.get("final_path")
if not final_path:
skipped_missing_final_path += 1
continue
normalized = str(final_path).replace("\\", "/")
if normalized.startswith("./downloads/"):
relative_path = normalized.replace("./downloads/", "../", 1)
elif "/downloads/" in normalized.lower():
idx = normalized.lower().rfind("/downloads/")
relative_path = "../" + normalized[idx + len("/downloads/") :]
elif normalized.startswith("downloads/"):
relative_path = "../" + normalized[len("downloads/") :]
else:
# As per assumption, everything is under downloads; if not, keep as-is
relative_path = normalized
# Add EXTINF line with track duration and title
duration_seconds = (
@@ -1156,13 +1081,14 @@ def update_playlist_m3u_file(playlist_spotify_id: str):
m3u_lines.append(f"#EXTINF:{duration_seconds},{artist_and_title}")
m3u_lines.append(relative_path)
included_count += 1
# Write m3u file
with open(m3u_file_path, "w", encoding="utf-8") as f:
f.write("\n".join(m3u_lines))
logger.info(
f"Updated m3u file for playlist '{playlist_name}' at {m3u_file_path} with {len(tracks)} tracks{f' (format: {convert_to})' if convert_to else ''}."
f"Updated m3u file for playlist '{playlist_name}' at {m3u_file_path} with {included_count} entries.{f' Skipped {skipped_missing_final_path} without final_path.' if skipped_missing_final_path else ''}"
)
except Exception as e: