fixed audio convertion fr this time
This commit is contained in:
161
debug_flac.py
161
debug_flac.py
@@ -1,161 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Specialized debugging script for investigating FLAC decryption issues.
|
||||
This script downloads a track and analyzes the decryption process in detail.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import json
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.FileHandler("flac_debug.log"),
|
||||
logging.StreamHandler()
|
||||
]
|
||||
)
|
||||
|
||||
logger = logging.getLogger('flac-debug')
|
||||
|
||||
# Import our modules
|
||||
from deezspot.deezloader import DeeLogin
|
||||
from deezspot.exceptions import BadCredentials, TrackNotFound
|
||||
from deezspot.deezloader.__download_utils__ import analyze_flac_file
|
||||
|
||||
def debug_flac_decryption(arl_token, track_url, output_dir="debug_output"):
|
||||
"""
|
||||
Debug the FLAC decryption process by downloading a track and analyzing each step.
|
||||
|
||||
Args:
|
||||
arl_token: Deezer ARL token
|
||||
track_url: URL of the track to download
|
||||
output_dir: Directory to save output files
|
||||
|
||||
Returns:
|
||||
Dict with debugging results
|
||||
"""
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
results = {
|
||||
"track_url": track_url,
|
||||
"steps": [],
|
||||
"success": False,
|
||||
"output_file": None,
|
||||
"analysis": None
|
||||
}
|
||||
|
||||
try:
|
||||
# Step 1: Initialize DeeLogin
|
||||
logger.info("Step 1: Initializing DeeLogin")
|
||||
results["steps"].append({"step": "init", "status": "starting"})
|
||||
|
||||
deezer = DeeLogin(arl=arl_token)
|
||||
results["steps"][-1]["status"] = "success"
|
||||
|
||||
# Step 2: Download the track
|
||||
logger.info(f"Step 2: Downloading track from {track_url}")
|
||||
results["steps"].append({"step": "download", "status": "starting"})
|
||||
|
||||
download_result = deezer.download_trackdee(
|
||||
track_url,
|
||||
output_dir=output_dir,
|
||||
quality_download="FLAC",
|
||||
recursive_quality=True,
|
||||
recursive_download=True
|
||||
)
|
||||
|
||||
if not download_result.success:
|
||||
results["steps"][-1]["status"] = "failed"
|
||||
results["steps"][-1]["error"] = "Download failed"
|
||||
return results
|
||||
|
||||
results["steps"][-1]["status"] = "success"
|
||||
results["output_file"] = download_result.song_path
|
||||
logger.info(f"Downloaded file to: {download_result.song_path}")
|
||||
|
||||
# Step 3: Analyze the downloaded file
|
||||
logger.info("Step 3: Analyzing downloaded FLAC file")
|
||||
results["steps"].append({"step": "analyze", "status": "starting"})
|
||||
|
||||
analysis = analyze_flac_file(download_result.song_path)
|
||||
results["analysis"] = analysis
|
||||
|
||||
if analysis.get("has_flac_signature", False) and not analysis.get("potential_issues"):
|
||||
results["steps"][-1]["status"] = "success"
|
||||
results["success"] = True
|
||||
logger.info("FLAC analysis completed successfully - file appears valid")
|
||||
else:
|
||||
results["steps"][-1]["status"] = "warning"
|
||||
issues = analysis.get("potential_issues", [])
|
||||
results["steps"][-1]["issues"] = issues
|
||||
logger.warning(f"FLAC analysis found potential issues: {issues}")
|
||||
|
||||
# Save detailed analysis to a JSON file
|
||||
analysis_file = os.path.join(output_dir, "flac_analysis.json")
|
||||
with open(analysis_file, 'w') as f:
|
||||
json.dump(analysis, f, indent=2)
|
||||
logger.info(f"Saved detailed analysis to {analysis_file}")
|
||||
|
||||
return results
|
||||
|
||||
except BadCredentials:
|
||||
logger.error("Invalid ARL token")
|
||||
results["steps"].append({"step": "error", "status": "failed", "error": "Invalid ARL token"})
|
||||
return results
|
||||
except TrackNotFound:
|
||||
logger.error(f"Track not found at URL: {track_url}")
|
||||
results["steps"].append({"step": "error", "status": "failed", "error": "Track not found"})
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.error(f"Error during debugging: {str(e)}", exc_info=True)
|
||||
results["steps"].append({"step": "error", "status": "failed", "error": str(e)})
|
||||
return results
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Debug FLAC decryption issues")
|
||||
parser.add_argument("--arl", help="Deezer ARL token")
|
||||
parser.add_argument("--track", help="Deezer track URL", default="https://www.deezer.com/us/track/2306672155")
|
||||
parser.add_argument("--output-dir", help="Output directory", default="debug_output")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Check for ARL token
|
||||
arl_token = args.arl or os.environ.get("DEEZER_ARL")
|
||||
if not arl_token:
|
||||
print("Error: Deezer ARL token not provided")
|
||||
print("Please provide with --arl or set the DEEZER_ARL environment variable")
|
||||
return 1
|
||||
|
||||
# Run the debugging
|
||||
print(f"Starting FLAC decryption debugging for track: {args.track}")
|
||||
results = debug_flac_decryption(arl_token, args.track, args.output_dir)
|
||||
|
||||
# Print summary
|
||||
print("\n===== Debugging Summary =====")
|
||||
for step in results["steps"]:
|
||||
status_icon = "✅" if step["status"] == "success" else "⚠️" if step["status"] == "warning" else "❌"
|
||||
print(f"{status_icon} {step['step'].capitalize()}: {step['status'].upper()}")
|
||||
|
||||
if step["status"] == "failed" and "error" in step:
|
||||
print(f" Error: {step['error']}")
|
||||
elif step["status"] == "warning" and "issues" in step:
|
||||
for issue in step["issues"]:
|
||||
print(f" Issue: {issue}")
|
||||
|
||||
if results["success"]:
|
||||
print("\n✅ FLAC file appears to be valid!")
|
||||
if results["output_file"]:
|
||||
print(f"Output file: {results['output_file']}")
|
||||
return 0
|
||||
else:
|
||||
print("\n❌ FLAC decryption had issues")
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,315 +1,332 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
from base64 import b64encode
|
||||
from mutagen.flac import FLAC, Picture
|
||||
import mutagen
|
||||
from mutagen.flac import FLAC, Picture as FLACPicture
|
||||
from mutagen.oggvorbis import OggVorbis
|
||||
from mutagen.oggopus import OggOpus
|
||||
from mutagen.mp4 import MP4, MP4Cover
|
||||
from mutagen.id3 import (
|
||||
ID3NoHeaderError, ID3,
|
||||
APIC, COMM, SYLT, TALB, TCOM, TCON, TCOP, TDRC, TEXT, TIT2, TLEN,
|
||||
TPE1, TPE2, TPOS, TPUB, TRCK, TSRC, TXXX, USLT, TYER
|
||||
)
|
||||
from deezspot.models import Track, Episode
|
||||
import requests
|
||||
import logging
|
||||
import os
|
||||
import traceback
|
||||
|
||||
logger = logging.getLogger("deezspot.taggers")
|
||||
|
||||
def request(url):
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
|
||||
from mutagen.id3 import (
|
||||
ID3NoHeaderError,
|
||||
ID3, APIC, USLT, SYLT,
|
||||
COMM, TSRC, TRCK, TIT2,
|
||||
TLEN, TEXT, TCON, TALB, TBPM,
|
||||
TPE1, TYER, TDAT, TPOS, TPE2,
|
||||
TPUB, TCOP, TXXX, TCOM, IPLS
|
||||
)
|
||||
# Helper to safely get image bytes
|
||||
def _get_image_bytes(image_data_or_url):
|
||||
if isinstance(image_data_or_url, bytes):
|
||||
return image_data_or_url
|
||||
elif isinstance(image_data_or_url, str): # Assuming it's a URL
|
||||
try:
|
||||
response = requests.get(image_data_or_url, timeout=10)
|
||||
response.raise_for_status()
|
||||
return response.content
|
||||
except requests.RequestException as e:
|
||||
logger.warning(f"Failed to download image from URL {image_data_or_url}: {e}")
|
||||
return None
|
||||
return None
|
||||
|
||||
def __write_flac(song, data):
|
||||
tag = FLAC(song)
|
||||
tag.delete()
|
||||
images = Picture()
|
||||
images.type = 3
|
||||
images.mime = 'image/jpeg'
|
||||
images.data = data['image']
|
||||
tag.clear_pictures()
|
||||
tag.add_picture(images)
|
||||
tag['lyrics'] = data['lyric']
|
||||
tag['artist'] = data['artist']
|
||||
tag['title'] = data['music']
|
||||
tag['date'] = f"{data['year'].year}/{data['year'].month}/{data['year'].day}"
|
||||
tag['album'] = data['album']
|
||||
tag['tracknumber'] = f"{data['tracknum']}"
|
||||
tag['discnumber'] = f"{data['discnum']}"
|
||||
tag['genre'] = data['genre']
|
||||
tag['albumartist'] = data['ar_album']
|
||||
tag['author'] = data['author']
|
||||
tag['composer'] = data['composer']
|
||||
tag['copyright'] = data['copyright']
|
||||
tag['bpm'] = f"{data['bpm']}"
|
||||
tag['length'] = f"{int(data['duration'] * 1000)}"
|
||||
tag['organization'] = data['label']
|
||||
tag['isrc'] = data['isrc']
|
||||
tag['lyricist'] = data['lyricist']
|
||||
tag['version'] = data['version']
|
||||
tag.save()
|
||||
def _format_year_for_id3(year_obj):
|
||||
if not year_obj or not hasattr(year_obj, 'year'):
|
||||
return None
|
||||
return str(year_obj.year)
|
||||
|
||||
def _format_date_for_vorbis(year_obj):
|
||||
if not year_obj or not hasattr(year_obj, 'strftime'):
|
||||
return None
|
||||
return year_obj.strftime('%Y-%m-%d')
|
||||
|
||||
def __write_mp3(song, data):
|
||||
def _format_date_for_mp4(year_obj):
|
||||
if not year_obj or not hasattr(year_obj, 'year'): # MP4 ©day can be just year or full date
|
||||
return None
|
||||
# For simplicity, just using year, but full date like YYYY-MM-DD is also valid
|
||||
return str(year_obj.year)
|
||||
|
||||
# --- MP3 (ID3 Tags) ---
|
||||
def __write_mp3(filepath, data):
|
||||
try:
|
||||
audio = ID3(song)
|
||||
audio.delete()
|
||||
tags = ID3(filepath)
|
||||
except ID3NoHeaderError:
|
||||
audio = ID3()
|
||||
tags = ID3()
|
||||
tags.delete(filepath, delete_v1=True, delete_v2=True) # Clear existing tags
|
||||
tags = ID3() # Re-initialize
|
||||
|
||||
audio.add(
|
||||
APIC(
|
||||
mime = "image/jpeg",
|
||||
type = 3,
|
||||
desc = "album front cover",
|
||||
data = data['image']
|
||||
)
|
||||
)
|
||||
if data.get('music'): tags.add(TIT2(encoding=3, text=str(data['music'])))
|
||||
if data.get('artist'): tags.add(TPE1(encoding=3, text=str(data['artist'])))
|
||||
if data.get('album'): tags.add(TALB(encoding=3, text=str(data['album'])))
|
||||
if data.get('ar_album'): tags.add(TPE2(encoding=3, text=str(data['ar_album']))) # Album Artist
|
||||
|
||||
audio.add(
|
||||
COMM(
|
||||
lang = "eng",
|
||||
desc = "my comment",
|
||||
text = "DO NOT USE FOR YOUR OWN EARNING"
|
||||
)
|
||||
)
|
||||
track_num_str = str(data.get('tracknum', ''))
|
||||
tracks_total_str = str(data.get('nb_tracks', ''))
|
||||
if track_num_str:
|
||||
tags.add(TRCK(encoding=3, text=f"{track_num_str}{f'/{tracks_total_str}' if tracks_total_str else ''}"))
|
||||
|
||||
audio.add(
|
||||
USLT(
|
||||
text = data['lyric']
|
||||
)
|
||||
)
|
||||
disc_num_str = str(data.get('discnum', ''))
|
||||
discs_total_str = str(data.get('nb_discs', '')) # Assuming 'nb_discs' if available
|
||||
if disc_num_str:
|
||||
tags.add(TPOS(encoding=3, text=f"{disc_num_str}{f'/{discs_total_str}' if discs_total_str else ''}"))
|
||||
|
||||
audio.add(
|
||||
SYLT(
|
||||
type = 1,
|
||||
format = 2,
|
||||
desc = "sync lyric song",
|
||||
text = data['lyric_sync']
|
||||
)
|
||||
)
|
||||
if data.get('genre'): tags.add(TCON(encoding=3, text=str(data['genre'])))
|
||||
|
||||
audio.add(
|
||||
TSRC(
|
||||
text = data['isrc']
|
||||
)
|
||||
)
|
||||
year_str = _format_year_for_id3(data.get('year'))
|
||||
if year_str: tags.add(TYER(encoding=3, text=year_str))
|
||||
|
||||
audio.add(
|
||||
TRCK(
|
||||
text = f"{data['tracknum']}/{data['nb_tracks']}"
|
||||
)
|
||||
)
|
||||
comment_text = data.get('comment', 'Downloaded by DeezSpot')
|
||||
tags.add(COMM(encoding=3, lang='eng', desc='', text=comment_text))
|
||||
|
||||
audio.add(
|
||||
TIT2(
|
||||
text = data['music']
|
||||
)
|
||||
)
|
||||
if data.get('composer'): tags.add(TCOM(encoding=3, text=str(data['composer'])))
|
||||
if data.get('copyright'): tags.add(TCOP(encoding=3, text=str(data['copyright'])))
|
||||
if data.get('label'): tags.add(TPUB(encoding=3, text=str(data['label']))) # Publisher/Label
|
||||
if data.get('isrc'): tags.add(TSRC(encoding=3, text=str(data['isrc'])))
|
||||
|
||||
audio.add(
|
||||
TLEN(
|
||||
text = f"{data['duration']}"
|
||||
)
|
||||
)
|
||||
duration_sec = data.get('duration')
|
||||
if isinstance(duration_sec, (int, float)) and duration_sec > 0:
|
||||
tags.add(TLEN(encoding=3, text=str(int(duration_sec * 1000))))
|
||||
|
||||
audio.add(
|
||||
TEXT(
|
||||
text = data['lyricist']
|
||||
)
|
||||
)
|
||||
if data.get('lyric'): tags.add(USLT(encoding=3, lang='eng', desc='', text=str(data['lyric'])))
|
||||
# SYLT for synced lyrics would need specific format for its text field
|
||||
|
||||
audio.add(
|
||||
TCON(
|
||||
text = data['genre']
|
||||
)
|
||||
)
|
||||
img_bytes = _get_image_bytes(data.get('image'))
|
||||
if img_bytes:
|
||||
tags.add(APIC(encoding=3, mime='image/jpeg', type=3, desc='Cover', data=img_bytes))
|
||||
|
||||
audio.add(
|
||||
TALB(
|
||||
text = data['album']
|
||||
)
|
||||
)
|
||||
if data.get('bpm') and str(data.get('bpm', '')).isdigit():
|
||||
tags.add(TXXX(encoding=3, desc='BPM', text=str(data['bpm'])))
|
||||
if data.get('author'): # Lyricist
|
||||
tags.add(TXXX(encoding=3, desc='LYRICIST', text=str(data['author'])))
|
||||
|
||||
audio.add(
|
||||
TBPM(
|
||||
text = f"{data['bpm']}"
|
||||
)
|
||||
)
|
||||
tags.save(filepath, v2_version=3)
|
||||
|
||||
audio.add(
|
||||
TPE1(
|
||||
text = data['artist']
|
||||
)
|
||||
)
|
||||
# --- M4A (AAC/ALAC in MP4 Container) ---
|
||||
def __write_m4a(filepath, data):
|
||||
try:
|
||||
mp4 = MP4(filepath)
|
||||
tags = mp4.tags
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not open M4A file {filepath} for tagging, trying to create new: {e}")
|
||||
try:
|
||||
mp4 = MP4() # Create a new MP4 object if loading fails
|
||||
tags = mp4.tags # Get its tags attribute (will be empty or None)
|
||||
except Exception as e_create:
|
||||
logger.error(f"Failed to initialize MP4 tags for {filepath}: {e_create}")
|
||||
return
|
||||
|
||||
audio.add(
|
||||
TYER(
|
||||
text = f"{data['year'].year}"
|
||||
)
|
||||
)
|
||||
# Atom names (ensure they are bytes for mutagen for older versions, strings for newer)
|
||||
# Mutagen generally handles this; use strings for keys for clarity.
|
||||
TAG_MAP = {
|
||||
'music': '\xa9nam', 'artist': '\xa9ART', 'album': '\xa9alb', 'ar_album': 'aART',
|
||||
'genre': '\xa9gen', 'composer': '\xa9wrt', 'copyright': 'cprt',
|
||||
'comment': '\xa9cmt', 'label': '\xa9pub' # Using a common atom for publisher
|
||||
}
|
||||
|
||||
audio.add(
|
||||
TDAT(
|
||||
text = f"{data['year'].day}{data['year'].month}"
|
||||
)
|
||||
)
|
||||
for data_key, atom_key in TAG_MAP.items():
|
||||
if data.get(data_key) is not None:
|
||||
tags[atom_key] = [str(data[data_key])]
|
||||
else:
|
||||
if atom_key in tags: del tags[atom_key]
|
||||
|
||||
audio.add(
|
||||
TPOS(
|
||||
text = f"{data['discnum']}/{data['discnum']}"
|
||||
)
|
||||
)
|
||||
mp4_date = _format_date_for_mp4(data.get('year'))
|
||||
if mp4_date: tags['\xa9day'] = [mp4_date]
|
||||
else:
|
||||
if '\xa9day' in tags: del tags['\xa9day']
|
||||
|
||||
audio.add(
|
||||
TPE2(
|
||||
text = data['ar_album']
|
||||
)
|
||||
)
|
||||
track_num = data.get('tracknum')
|
||||
tracks_total = data.get('nb_tracks', 0)
|
||||
if track_num is not None:
|
||||
tags['trkn'] = [[int(track_num), int(tracks_total)]]
|
||||
else:
|
||||
if 'trkn' in tags: del tags['trkn']
|
||||
|
||||
audio.add(
|
||||
TPUB(
|
||||
text = data['label']
|
||||
)
|
||||
)
|
||||
disc_num = data.get('discnum')
|
||||
discs_total = data.get('nb_discs', 0) # Assuming 'nb_discs' if available
|
||||
if disc_num is not None:
|
||||
tags['disk'] = [[int(disc_num), int(discs_total)]]
|
||||
else:
|
||||
if 'disk' in tags: del tags['disk']
|
||||
|
||||
audio.add(
|
||||
TCOP(
|
||||
text = data['copyright']
|
||||
)
|
||||
)
|
||||
if data.get('bpm') and str(data.get('bpm','')).isdigit():
|
||||
tags['tmpo'] = [int(data['bpm'])]
|
||||
elif 'tmpo' in tags: del tags['tmpo']
|
||||
|
||||
audio.add(
|
||||
TXXX(
|
||||
desc = "REPLAYGAIN_TRACK_GAIN",
|
||||
text = f"{data['gain']}"
|
||||
)
|
||||
)
|
||||
if data.get('lyric'):
|
||||
tags['\xa9lyr'] = [str(data['lyric'])]
|
||||
elif '\xa9lyr' in tags: del tags['\xa9lyr']
|
||||
|
||||
audio.add(
|
||||
TCOM(
|
||||
text = data['composer']
|
||||
)
|
||||
)
|
||||
img_bytes = _get_image_bytes(data.get('image'))
|
||||
if img_bytes:
|
||||
img_format = MP4Cover.FORMAT_JPEG if img_bytes.startswith(b'\xff\xd8') else MP4Cover.FORMAT_PNG
|
||||
tags['covr'] = [MP4Cover(img_bytes, imageformat=img_format)]
|
||||
elif 'covr' in tags: del tags['covr']
|
||||
|
||||
audio.add(
|
||||
IPLS(
|
||||
people = [
|
||||
data['author']
|
||||
]
|
||||
)
|
||||
)
|
||||
# For ISRC - often stored in a custom way
|
||||
if data.get('isrc'):
|
||||
tags['----:com.apple.iTunes:ISRC'] = bytes(str(data['isrc']), 'utf-8')
|
||||
elif '----:com.apple.iTunes:ISRC' in tags: del tags['----:com.apple.iTunes:ISRC']
|
||||
|
||||
audio.save(song, v2_version = 3)
|
||||
try:
|
||||
mp4.save(filepath) # Use the MP4 object's save method
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save M4A tags for {filepath}: {e}")
|
||||
|
||||
def __write_ogg(song, song_metadata):
|
||||
audio = OggVorbis(song)
|
||||
audio.delete()
|
||||
# --- Vorbis Comments (FLAC, OGG, OPUS) ---
|
||||
def __write_vorbis(filepath, data, audio_format_class):
|
||||
try:
|
||||
tags = audio_format_class(filepath)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not open {filepath} for Vorbis tagging ({audio_format_class.__name__}), creating new tags: {e}")
|
||||
try:
|
||||
instance = audio_format_class()
|
||||
instance.save(filepath)
|
||||
tags = audio_format_class(filepath)
|
||||
except Exception as e_create:
|
||||
logger.error(f"Failed to create/load {filepath} for Vorbis tagging: {e_create}")
|
||||
return
|
||||
|
||||
# Standard Vorbis comment fields mapping
|
||||
field_mapping = {
|
||||
'music': 'title',
|
||||
'artist': 'artist',
|
||||
'album': 'album',
|
||||
'tracknum': 'tracknumber',
|
||||
'discnum': 'discnumber',
|
||||
'year': 'date',
|
||||
'genre': 'genre',
|
||||
'isrc': 'isrc',
|
||||
'description': 'description',
|
||||
'ar_album': 'albumartist',
|
||||
'composer': 'composer',
|
||||
'copyright': 'copyright',
|
||||
'bpm': 'bpm',
|
||||
'lyricist': 'lyricist',
|
||||
'version': 'version'
|
||||
}
|
||||
tags.delete() # Clear existing tags before adding new ones
|
||||
|
||||
# Add standard text metadata
|
||||
for source_key, vorbis_key in field_mapping.items():
|
||||
if source_key in song_metadata:
|
||||
value = song_metadata[source_key]
|
||||
VORBIS_MAP = {
|
||||
'music': 'TITLE', 'artist': 'ARTIST', 'album': 'ALBUM', 'ar_album': 'ALBUMARTIST',
|
||||
'genre': 'GENRE', 'composer': 'COMPOSER', 'copyright': 'COPYRIGHT',
|
||||
'label': 'ORGANIZATION', 'isrc': 'ISRC', 'comment': 'COMMENT',
|
||||
'lyric': 'LYRICS', 'author': 'LYRICIST', 'version': 'VERSION'
|
||||
}
|
||||
|
||||
# Special handling for date field
|
||||
if vorbis_key == 'date':
|
||||
# Convert datetime object to YYYY-MM-DD string format
|
||||
if hasattr(value, 'strftime'):
|
||||
value = value.strftime('%Y-%m-%d')
|
||||
# Handle string timestamps if necessary
|
||||
elif isinstance(value, str) and ' ' in value:
|
||||
value = value.split()[0]
|
||||
for data_key, vorbis_key in VORBIS_MAP.items():
|
||||
if data.get(data_key) is not None: tags[vorbis_key] = str(data[data_key])
|
||||
|
||||
# Skip "Unknown" BPM values or other non-numeric BPM values
|
||||
if vorbis_key == 'bpm' and (value == "Unknown" or not isinstance(value, (int, float)) and not str(value).isdigit()):
|
||||
continue
|
||||
vorbis_date = _format_date_for_vorbis(data.get('year'))
|
||||
if vorbis_date: tags['DATE'] = vorbis_date
|
||||
|
||||
audio[vorbis_key] = [str(value)]
|
||||
if data.get('tracknum') is not None: tags['TRACKNUMBER'] = str(data['tracknum'])
|
||||
if data.get('nb_tracks') is not None: tags['TRACKTOTAL'] = str(data['nb_tracks'])
|
||||
if data.get('discnum') is not None: tags['DISCNUMBER'] = str(data['discnum'])
|
||||
if data.get('nb_discs') is not None: tags['DISCTOTAL'] = str(data['nb_discs'])
|
||||
|
||||
# Add lyrics if present
|
||||
if 'lyric' in song_metadata:
|
||||
audio['lyrics'] = [str(song_metadata['lyric'])]
|
||||
if data.get('bpm') and str(data.get('bpm','')).isdigit():
|
||||
tags['BPM'] = str(data['bpm'])
|
||||
|
||||
# Handle cover art
|
||||
if 'image' in song_metadata:
|
||||
try:
|
||||
image = Picture()
|
||||
image.type = 3 # Front cover
|
||||
image.mime = 'image/jpeg'
|
||||
image.desc = 'Cover'
|
||||
duration_sec = data.get('duration')
|
||||
if isinstance(duration_sec, (int, float)) and duration_sec > 0:
|
||||
tags['LENGTH'] = str(duration_sec) # Store as seconds string
|
||||
|
||||
if isinstance(song_metadata['image'], bytes):
|
||||
image.data = song_metadata['image']
|
||||
else:
|
||||
image.data = request(song_metadata['image']).content
|
||||
img_bytes = _get_image_bytes(data.get('image'))
|
||||
if img_bytes:
|
||||
if audio_format_class == FLAC:
|
||||
pic = FLACPicture()
|
||||
pic.type = 3
|
||||
pic.mime = 'image/jpeg' if img_bytes.startswith(b'\xff\xd8') else 'image/png'
|
||||
pic.data = img_bytes
|
||||
tags.clear_pictures()
|
||||
tags.add_picture(pic)
|
||||
elif audio_format_class in [OggVorbis, OggOpus]:
|
||||
try:
|
||||
# For OGG/Opus, METADATA_BLOCK_PICTURE is a base64 encoded FLAC Picture block
|
||||
pic_for_ogg = FLACPicture() # Use FLACPicture structure
|
||||
pic_for_ogg.type = 3
|
||||
pic_for_ogg.mime = 'image/jpeg' if img_bytes.startswith(b'\xff\xd8') else 'image/png'
|
||||
pic_for_ogg.data = img_bytes
|
||||
tags['METADATA_BLOCK_PICTURE'] = [b64encode(pic_for_ogg.write()).decode('ascii')]
|
||||
except Exception as e_ogg_pic:
|
||||
logger.warning(f"Could not prepare/embed cover art for OGG/Opus in {filepath}: {e_ogg_pic}")
|
||||
try:
|
||||
tags.save()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save Vorbis tags for {filepath} ({audio_format_class.__name__}): {e}")
|
||||
|
||||
# Encode using base64 as required by Vorbis spec
|
||||
audio['metadata_block_picture'] = [
|
||||
b64encode(image.write()).decode('utf-8')
|
||||
]
|
||||
except Exception as e:
|
||||
print(f"Error adding cover art: {e}")
|
||||
# --- WAV (ID3 Tags) ---
|
||||
def __write_wav(filepath, data):
|
||||
# WAV files can store ID3 tags. This is more versatile than RIFF INFO.
|
||||
__write_mp3(filepath, data) # Reuse MP3/ID3 logic
|
||||
|
||||
# Additional validation for numeric fields - exclude BPM since we already handled it
|
||||
numeric_fields = ['tracknumber', 'discnumber']
|
||||
for field in numeric_fields:
|
||||
if field in audio:
|
||||
try:
|
||||
int(audio[field][0])
|
||||
except ValueError:
|
||||
print(f"Warning: Invalid numeric value for {field}")
|
||||
del audio[field]
|
||||
|
||||
audio.save()
|
||||
|
||||
# --- Main Dispatcher ---
|
||||
def write_tags(media):
|
||||
if isinstance(media, Track):
|
||||
song = media.song_path
|
||||
elif isinstance(media, Episode):
|
||||
song = media.episode_path
|
||||
else:
|
||||
raise ValueError("Unsupported media type")
|
||||
if isinstance(media, Track):
|
||||
filepath = media.song_path
|
||||
elif isinstance(media, Episode):
|
||||
filepath = getattr(media, 'episode_path', getattr(media, 'song_path', None)) # Episode model might vary
|
||||
else:
|
||||
logger.error(f"Unsupported media type for tagging: {type(media)}")
|
||||
return
|
||||
|
||||
song_metadata = media.tags
|
||||
f_format = media.file_format
|
||||
if not filepath:
|
||||
logger.error(f"Filepath is missing for tagging media object: {media}")
|
||||
return
|
||||
|
||||
if f_format == ".flac":
|
||||
__write_flac(song, song_metadata)
|
||||
elif f_format == ".ogg":
|
||||
__write_ogg(song, song_metadata)
|
||||
else:
|
||||
__write_mp3(song, song_metadata)
|
||||
song_metadata = getattr(media, 'tags', None)
|
||||
if not song_metadata:
|
||||
logger.warning(f"No metadata (tags) found for {filepath}. Skipping tagging.")
|
||||
return
|
||||
|
||||
file_ext = getattr(media, 'file_format', None)
|
||||
if not file_ext:
|
||||
logger.warning(f"File format not specified in media object for {filepath}. Attempting to guess from filepath.")
|
||||
_, file_ext = os.path.splitext(filepath)
|
||||
if not file_ext:
|
||||
logger.error(f"Could not determine file format for {filepath}. Skipping tagging.")
|
||||
return
|
||||
|
||||
file_ext = file_ext.lower()
|
||||
logger.info(f"Writing tags for: {filepath} (Format: {file_ext})")
|
||||
|
||||
try:
|
||||
if file_ext == ".mp3":
|
||||
__write_mp3(filepath, song_metadata)
|
||||
elif file_ext == ".flac":
|
||||
__write_vorbis(filepath, song_metadata, FLAC)
|
||||
elif file_ext == ".ogg":
|
||||
__write_vorbis(filepath, song_metadata, OggVorbis)
|
||||
elif file_ext == ".opus":
|
||||
__write_vorbis(filepath, song_metadata, OggOpus)
|
||||
elif file_ext == ".m4a": # Handles AAC and ALAC
|
||||
__write_m4a(filepath, song_metadata)
|
||||
elif file_ext == ".wav":
|
||||
__write_wav(filepath, song_metadata)
|
||||
else:
|
||||
logger.warning(f"Unsupported file format for tagging: {file_ext} for file {filepath}")
|
||||
except Exception as e:
|
||||
logger.error(f"General error during tagging for {filepath}: {e}")
|
||||
logger.debug(traceback.format_exc())
|
||||
|
||||
# Placeholder - purpose seems to be for checking if tags were written correctly or file integrity.
|
||||
# Actual implementation would depend on specific needs.
|
||||
def check_track(media):
|
||||
if isinstance(media, Track):
|
||||
song = media.song_path
|
||||
elif isinstance(media, Episode):
|
||||
song = media.episode_path
|
||||
else:
|
||||
raise ValueError("Unsupported media type")
|
||||
if isinstance(media, Track):
|
||||
filepath = media.song_path
|
||||
elif isinstance(media, Episode):
|
||||
filepath = getattr(media, 'episode_path', getattr(media, 'song_path', None))
|
||||
else:
|
||||
logger.warning(f"check_track called with unsupported media type: {type(media)}")
|
||||
return False
|
||||
|
||||
f_format = media.file_format
|
||||
is_ok = False
|
||||
if not filepath or not os.path.exists(filepath):
|
||||
logger.warning(f"check_track: Filepath missing or file does not exist: {filepath}")
|
||||
return False
|
||||
|
||||
# Add your logic to check the track/episode here
|
||||
|
||||
return is_ok
|
||||
try:
|
||||
audio = mutagen.File(filepath, easy=True) # Try loading with easy tags
|
||||
if audio is None or not audio.tags:
|
||||
logger.info(f"check_track: No tags found or file not recognized by mutagen for {filepath}")
|
||||
return False
|
||||
# Add more specific checks here if needed, e.g., check for a title tag
|
||||
if audio.get('title') or audio.get('TIT2') or audio.get('\xa9nam'):
|
||||
logger.info(f"check_track: Basic tags appear to be present for {filepath}")
|
||||
return True
|
||||
else:
|
||||
logger.info(f"check_track: Essential tags (like title) seem to be missing in {filepath}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"check_track: Error loading file {filepath} with mutagen: {e}")
|
||||
return False
|
||||
@@ -5,7 +5,7 @@ import requests
|
||||
import time
|
||||
from os.path import isfile
|
||||
from copy import deepcopy
|
||||
from deezspot.libutils.audio_converter import convert_audio, parse_format_string
|
||||
from deezspot.libutils.audio_converter import convert_audio
|
||||
from deezspot.deezloader.dee_api import API
|
||||
from deezspot.deezloader.deegw_api import API_GW
|
||||
from deezspot.deezloader.deezer_settings import qualities
|
||||
@@ -43,6 +43,9 @@ from mutagen.id3 import ID3
|
||||
from mutagen.mp4 import MP4
|
||||
from mutagen import File
|
||||
from deezspot.libutils.logging_utils import logger, ProgressReporter
|
||||
from deezspot.libutils.skip_detection import check_track_exists
|
||||
from deezspot.libutils.cleanup_utils import register_active_download, unregister_active_download
|
||||
from deezspot.libutils.audio_converter import AUDIO_FORMATS # Added for parse_format_string
|
||||
|
||||
class Download_JOB:
|
||||
progress_reporter = None
|
||||
@@ -200,6 +203,7 @@ class EASY_DW:
|
||||
self.__recursive_quality = preferences.recursive_quality
|
||||
self.__recursive_download = preferences.recursive_download
|
||||
self.__convert_to = getattr(preferences, 'convert_to', None)
|
||||
self.__bitrate = getattr(preferences, 'bitrate', None) # Added for consistency
|
||||
|
||||
|
||||
if self.__infos_dw.get('__TYPE__') == 'episode':
|
||||
@@ -226,45 +230,6 @@ class EASY_DW:
|
||||
self.__set_quality()
|
||||
self.__write_track()
|
||||
|
||||
def __track_already_exists(self, title, album):
|
||||
# Ensure the song path is set; if not, compute it.
|
||||
if not hasattr(self, '_EASY_DW__song_path') or not self.__song_path:
|
||||
self.__set_song_path()
|
||||
|
||||
# Get only the final directory where the track will be saved.
|
||||
final_dir = os.path.dirname(self.__song_path)
|
||||
if not os.path.exists(final_dir):
|
||||
return False
|
||||
|
||||
# List files only in the final directory.
|
||||
for file in os.listdir(final_dir):
|
||||
file_path = os.path.join(final_dir, file)
|
||||
lower_file = file.lower()
|
||||
try:
|
||||
existing_title = None
|
||||
existing_album = None
|
||||
if lower_file.endswith('.flac'):
|
||||
audio = FLAC(file_path)
|
||||
existing_title = audio.get('title', [None])[0]
|
||||
existing_album = audio.get('album', [None])[0]
|
||||
elif lower_file.endswith('.mp3'):
|
||||
audio = MP3(file_path, ID3=ID3)
|
||||
existing_title = audio.get('TIT2', [None])[0]
|
||||
existing_album = audio.get('TALB', [None])[0]
|
||||
elif lower_file.endswith('.m4a'):
|
||||
audio = MP4(file_path)
|
||||
existing_title = audio.get('\xa9nam', [None])[0]
|
||||
existing_album = audio.get('\xa9alb', [None])[0]
|
||||
elif lower_file.endswith(('.ogg', '.wav')):
|
||||
audio = File(file_path)
|
||||
existing_title = audio.get('title', [None])[0]
|
||||
existing_album = audio.get('album', [None])[0]
|
||||
if existing_title == title and existing_album == album:
|
||||
return True
|
||||
except Exception:
|
||||
continue
|
||||
return False
|
||||
|
||||
def __set_quality(self) -> None:
|
||||
self.__file_format = self.__c_quality['f_format']
|
||||
self.__song_quality = self.__c_quality['s_quality']
|
||||
@@ -334,16 +299,40 @@ class EASY_DW:
|
||||
# Check if track already exists based on metadata
|
||||
current_title = self.__song_metadata['music']
|
||||
current_album = self.__song_metadata['album']
|
||||
if self.__track_already_exists(current_title, current_album):
|
||||
# Create skipped progress report using the new required format
|
||||
current_artist = self.__song_metadata.get('artist') # For logging
|
||||
|
||||
# Use check_track_exists from skip_detection module
|
||||
# self.__song_path is the original path before any conversion logic in this download attempt.
|
||||
# self.__convert_to is the user's desired final format.
|
||||
exists, existing_file_path = check_track_exists(
|
||||
original_song_path=self.__song_path,
|
||||
title=current_title,
|
||||
album=current_album,
|
||||
convert_to=self.__convert_to, # User's target conversion format
|
||||
logger=logger
|
||||
)
|
||||
|
||||
if exists and existing_file_path:
|
||||
logger.info(f"Track '{current_title}' by '{current_artist}' already exists at '{existing_file_path}'. Skipping download.")
|
||||
|
||||
self.__c_track.song_path = existing_file_path
|
||||
_, new_ext = os.path.splitext(existing_file_path)
|
||||
self.__c_track.file_format = new_ext.lower()
|
||||
# self.__c_track.song_quality might need re-evaluation if we could determine
|
||||
# quality of existing file. For now, assume it's acceptable.
|
||||
|
||||
self.__c_track.success = True
|
||||
self.__c_track.was_skipped = True
|
||||
|
||||
progress_data = {
|
||||
"type": "track",
|
||||
"song": current_title,
|
||||
"artist": self.__song_metadata['artist'],
|
||||
"status": "skipped",
|
||||
"url": self.__link,
|
||||
"reason": "Track already exists",
|
||||
"convert_to": self.__convert_to
|
||||
"reason": f"Track already exists in desired format at {existing_file_path}",
|
||||
"convert_to": self.__convert_to,
|
||||
"bitrate": self.__bitrate
|
||||
}
|
||||
|
||||
# Add parent info based on parent type
|
||||
@@ -389,15 +378,15 @@ class EASY_DW:
|
||||
# Create a minimal track object for skipped scenario
|
||||
skipped_item = Track(
|
||||
self.__song_metadata,
|
||||
self.__song_path, # song_path would be set if __write_track was called
|
||||
self.__file_format, self.__song_quality,
|
||||
existing_file_path, # Use the path of the existing file
|
||||
self.__c_track.file_format, # Use updated file format
|
||||
self.__song_quality, # Original download quality target
|
||||
self.__link, self.__ids
|
||||
)
|
||||
skipped_item.success = False
|
||||
skipped_item.success = True # Considered successful as file is available
|
||||
skipped_item.was_skipped = True
|
||||
# It's important that this skipped_item is what's checked later, or self.__c_track is updated
|
||||
self.__c_track = skipped_item # Ensure self.__c_track reflects this skipped state
|
||||
return self.__c_track # Return the correctly flagged skipped track
|
||||
self.__c_track = skipped_item
|
||||
return self.__c_track
|
||||
|
||||
# Initialize success to False for the item being processed
|
||||
if self.__infos_dw.get('__TYPE__') == 'episode':
|
||||
@@ -626,9 +615,22 @@ class EASY_DW:
|
||||
Download_JOB.report_progress(progress_data)
|
||||
|
||||
# Start of processing block (decryption, tagging, cover, conversion)
|
||||
# Decrypt the file using the utility function
|
||||
decryptfile(c_crypted_audio, self.__fallback_ids, self.__song_path)
|
||||
logger.debug(f"Successfully decrypted track using {encryption_type} encryption")
|
||||
register_active_download(self.__song_path)
|
||||
try:
|
||||
# Decrypt the file using the utility function
|
||||
decryptfile(c_crypted_audio, self.__fallback_ids, self.__song_path)
|
||||
logger.debug(f"Successfully decrypted track using {encryption_type} encryption")
|
||||
# self.__song_path is still registered
|
||||
except Exception as e_decrypt:
|
||||
unregister_active_download(self.__song_path)
|
||||
if isfile(self.__song_path):
|
||||
try:
|
||||
os.remove(self.__song_path)
|
||||
except OSError: # Handle potential errors during removal
|
||||
logger.warning(f"Could not remove partially downloaded file: {self.__song_path}")
|
||||
self.__c_track.success = False
|
||||
self.__c_track.error_message = f"Decryption failed: {str(e_decrypt)}"
|
||||
raise TrackNotFound(f"Failed to process {self.__song_path}. Error: {str(e_decrypt)}") from e_decrypt
|
||||
|
||||
self.__add_more_tags() # self.__song_metadata is updated here
|
||||
self.__c_track.tags = self.__song_metadata # IMPORTANT: Update track object's tags
|
||||
@@ -644,31 +646,55 @@ class EASY_DW:
|
||||
|
||||
# Apply audio conversion if requested
|
||||
if self.__convert_to:
|
||||
format_name, bitrate = parse_format_string(self.__convert_to)
|
||||
format_name, bitrate = self._parse_format_string(self.__convert_to)
|
||||
if format_name:
|
||||
from deezspot.deezloader.__download__ import register_active_download, unregister_active_download # Ensure these are available or handle differently
|
||||
# Current self.__song_path (original decrypted file) is registered.
|
||||
# convert_audio will handle unregistering it if it creates a new file,
|
||||
# and will register the new file.
|
||||
path_before_conversion = self.__song_path
|
||||
try:
|
||||
converted_path = convert_audio(
|
||||
self.__song_path,
|
||||
path_before_conversion,
|
||||
format_name,
|
||||
bitrate,
|
||||
bitrate if bitrate else self.__bitrate, # Prefer specific bitrate from string, fallback to general
|
||||
register_active_download,
|
||||
unregister_active_download
|
||||
)
|
||||
if converted_path != self.__song_path:
|
||||
if converted_path != path_before_conversion:
|
||||
# convert_audio has unregistered path_before_conversion (if it existed and was different)
|
||||
# and registered converted_path.
|
||||
self.__song_path = converted_path
|
||||
self.__c_track.song_path = converted_path
|
||||
_, new_ext = os.path.splitext(converted_path)
|
||||
self.__file_format = new_ext.lower() # Update internal state
|
||||
self.__c_track.file_format = new_ext.lower()
|
||||
# self.__song_path (the converted_path) is now the registered active download
|
||||
# If converted_path == path_before_conversion, no actual file change, registration status managed by convert_audio
|
||||
except Exception as conv_error:
|
||||
logger.error(f"Audio conversion error: {str(conv_error)}")
|
||||
# Decide if this is a fatal error for the track or if we proceed with original
|
||||
logger.error(f"Audio conversion error: {str(conv_error)}. Proceeding with original format.")
|
||||
# path_before_conversion should still be registered if convert_audio failed early
|
||||
# or did not successfully unregister it.
|
||||
# If conversion fails, the original file (path_before_conversion) remains the target.
|
||||
# Its registration state should be preserved if convert_audio didn't affect it.
|
||||
# For safety, ensure it is considered the active download if conversion fails:
|
||||
register_active_download(path_before_conversion)
|
||||
|
||||
|
||||
# Write tags to the final file (original or converted)
|
||||
write_tags(self.__c_track)
|
||||
self.__c_track.success = True # Mark as successful only after all steps including tags
|
||||
unregister_active_download(self.__song_path) # Unregister the final successful file
|
||||
|
||||
except Exception as e: # Handles errors from __write_track, decrypt, add_tags, save_cover, convert, write_tags
|
||||
# Ensure unregister is called for self.__song_path if it was registered and an error occurred
|
||||
# The specific error might have already unregistered it (e.g. decrypt error)
|
||||
# Call it defensively.
|
||||
unregister_active_download(self.__song_path)
|
||||
if isfile(self.__song_path):
|
||||
os.remove(self.__song_path)
|
||||
try:
|
||||
os.remove(self.__song_path)
|
||||
except OSError:
|
||||
logger.warning(f"Could not remove file on error: {self.__song_path}")
|
||||
|
||||
error_msg = str(e)
|
||||
if "Data must be padded" in error_msg: error_msg = "Decryption error (padding issue) - Try a different quality setting or download format"
|
||||
@@ -707,6 +733,8 @@ class EASY_DW:
|
||||
error_message = f"Download failed for '{song_title}' by '{artist_name}' (Link: {self.__link}). Error: {str(e)}"
|
||||
logger.error(error_message)
|
||||
# Store error on track object if possible
|
||||
# Ensure self.__song_path is unregistered if an error occurs before successful completion.
|
||||
unregister_active_download(self.__song_path)
|
||||
if hasattr(self, '_EASY_DW__c_track') and self.__c_track:
|
||||
self.__c_track.success = False
|
||||
self.__c_track.error_message = str(e)
|
||||
@@ -720,40 +748,60 @@ class EASY_DW:
|
||||
|
||||
os.makedirs(os.path.dirname(self.__song_path), exist_ok=True)
|
||||
|
||||
response = requests.get(direct_url, stream=True)
|
||||
response.raise_for_status()
|
||||
register_active_download(self.__song_path)
|
||||
try:
|
||||
response = requests.get(direct_url, stream=True)
|
||||
response.raise_for_status()
|
||||
|
||||
content_length = response.headers.get('content-length')
|
||||
total_size = int(content_length) if content_length else None
|
||||
content_length = response.headers.get('content-length')
|
||||
total_size = int(content_length) if content_length else None
|
||||
|
||||
downloaded = 0
|
||||
with open(self.__song_path, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
size = f.write(chunk)
|
||||
downloaded += size
|
||||
downloaded = 0
|
||||
with open(self.__song_path, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
size = f.write(chunk)
|
||||
downloaded += size
|
||||
|
||||
# Download progress reporting could be added here
|
||||
# Download progress reporting could be added here
|
||||
|
||||
# Build episode progress report
|
||||
progress_data = {
|
||||
"type": "episode",
|
||||
"song": self.__song_metadata.get('music', 'Unknown Episode'),
|
||||
"artist": self.__song_metadata.get('artist', 'Unknown Show'),
|
||||
"status": "done"
|
||||
}
|
||||
# If download successful, unregister the initially downloaded file before potential conversion
|
||||
unregister_active_download(self.__song_path)
|
||||
|
||||
# Use Spotify URL if available (for downloadspo functions), otherwise use Deezer link
|
||||
spotify_url = getattr(self.__preferences, 'spotify_url', None)
|
||||
progress_data["url"] = spotify_url if spotify_url else self.__link
|
||||
|
||||
Download_JOB.report_progress(progress_data)
|
||||
# Build episode progress report
|
||||
progress_data = {
|
||||
"type": "episode",
|
||||
"song": self.__song_metadata.get('music', 'Unknown Episode'),
|
||||
"artist": self.__song_metadata.get('artist', 'Unknown Show'),
|
||||
"status": "done"
|
||||
}
|
||||
|
||||
self.__c_track.success = True
|
||||
self.__write_episode()
|
||||
write_tags(self.__c_track)
|
||||
# Use Spotify URL if available (for downloadspo functions), otherwise use Deezer link
|
||||
spotify_url = getattr(self.__preferences, 'spotify_url', None)
|
||||
progress_data["url"] = spotify_url if spotify_url else self.__link
|
||||
|
||||
return self.__c_track
|
||||
Download_JOB.report_progress(progress_data)
|
||||
|
||||
self.__c_track.success = True
|
||||
self.__write_episode()
|
||||
write_tags(self.__c_track)
|
||||
|
||||
return self.__c_track
|
||||
|
||||
except Exception as e_dw_ep: # Catches errors from requests.get, file writing
|
||||
unregister_active_download(self.__song_path) # Unregister if download part failed
|
||||
if isfile(self.__song_path):
|
||||
try:
|
||||
os.remove(self.__song_path)
|
||||
except OSError:
|
||||
logger.warning(f"Could not remove episode file on error: {self.__song_path}")
|
||||
self.__c_track.success = False # Mark as failed
|
||||
episode_title = self.__preferences.song_metadata.get('music', 'Unknown Episode')
|
||||
err_msg = f"Episode download failed for '{episode_title}' (URL: {self.__link}). Error: {str(e_dw_ep)}"
|
||||
logger.error(err_msg)
|
||||
self.__c_track.error_message = str(e_dw_ep)
|
||||
raise TrackNotFound(message=err_msg, url=self.__link) from e_dw_ep
|
||||
|
||||
except Exception as e:
|
||||
if isfile(self.__song_path):
|
||||
@@ -766,6 +814,35 @@ class EASY_DW:
|
||||
self.__c_track.error_message = str(e)
|
||||
raise TrackNotFound(message=err_msg, url=self.__link) from e
|
||||
|
||||
def _parse_format_string(self, format_str: str) -> tuple[str | None, str | None]:
|
||||
"""Helper to parse format string like 'MP3_320K' into format and bitrate."""
|
||||
if not format_str:
|
||||
return None, None
|
||||
|
||||
parts = format_str.upper().split('_', 1)
|
||||
format_name = parts[0]
|
||||
bitrate = parts[1] if len(parts) > 1 else None
|
||||
|
||||
if format_name not in AUDIO_FORMATS:
|
||||
logger.warning(f"Unsupported format {format_name} in format string '{format_str}'. Will not convert.")
|
||||
return None, None
|
||||
|
||||
if bitrate:
|
||||
# Ensure bitrate ends with 'K' for consistency if it's a number followed by K
|
||||
if bitrate[:-1].isdigit() and not bitrate.endswith('K'):
|
||||
bitrate += 'K'
|
||||
|
||||
valid_bitrates = AUDIO_FORMATS[format_name].get("bitrates", [])
|
||||
if valid_bitrates and bitrate not in valid_bitrates:
|
||||
default_br = AUDIO_FORMATS[format_name].get("default_bitrate")
|
||||
logger.warning(f"Unsupported bitrate {bitrate} for {format_name}. Using default {default_br if default_br else 'as available'}.")
|
||||
bitrate = default_br # Fallback to default, or None if no specific default for lossless
|
||||
elif not valid_bitrates and AUDIO_FORMATS[format_name].get("default_bitrate") is None: # Lossless format
|
||||
logger.info(f"Bitrate {bitrate} specified for lossless format {format_name}. Bitrate will be ignored by converter.")
|
||||
# Keep bitrate as is, convert_audio will handle ignoring it for lossless.
|
||||
|
||||
return format_name, bitrate
|
||||
|
||||
def __add_more_tags(self) -> None:
|
||||
contributors = self.__infos_dw.get('SNG_CONTRIBUTORS', {})
|
||||
|
||||
|
||||
@@ -104,6 +104,7 @@ class DeeLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Track:
|
||||
|
||||
@@ -141,6 +142,7 @@ class DeeLogin:
|
||||
preferences.max_retries = max_retries
|
||||
# Audio conversion parameter
|
||||
preferences.convert_to = convert_to
|
||||
preferences.bitrate = bitrate
|
||||
preferences.save_cover = save_cover
|
||||
|
||||
track = DW_TRACK(preferences).dw()
|
||||
@@ -162,6 +164,7 @@ class DeeLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Album:
|
||||
|
||||
@@ -197,6 +200,7 @@ class DeeLogin:
|
||||
preferences.max_retries = max_retries
|
||||
# Audio conversion parameter
|
||||
preferences.convert_to = convert_to
|
||||
preferences.bitrate = bitrate
|
||||
preferences.save_cover = save_cover
|
||||
|
||||
album = DW_ALBUM(preferences).dw()
|
||||
@@ -218,6 +222,7 @@ class DeeLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Playlist:
|
||||
|
||||
@@ -263,6 +268,7 @@ class DeeLogin:
|
||||
preferences.max_retries = max_retries
|
||||
# Audio conversion parameter
|
||||
preferences.convert_to = convert_to
|
||||
preferences.bitrate = bitrate
|
||||
preferences.save_cover = save_cover
|
||||
|
||||
playlist = DW_PLAYLIST(preferences).dw()
|
||||
@@ -280,6 +286,7 @@ class DeeLogin:
|
||||
custom_track_format=None,
|
||||
pad_tracks=True,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> list[Track]:
|
||||
|
||||
@@ -297,6 +304,7 @@ class DeeLogin:
|
||||
custom_track_format=custom_track_format,
|
||||
pad_tracks=pad_tracks,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
for track in playlist_json
|
||||
@@ -339,6 +347,7 @@ class DeeLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Track:
|
||||
|
||||
@@ -358,6 +367,7 @@ class DeeLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
|
||||
@@ -457,6 +467,7 @@ class DeeLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Album:
|
||||
|
||||
@@ -474,6 +485,7 @@ class DeeLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
|
||||
@@ -494,6 +506,7 @@ class DeeLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Playlist:
|
||||
|
||||
@@ -571,6 +584,7 @@ class DeeLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
tracks.append(downloaded_track)
|
||||
@@ -626,6 +640,7 @@ class DeeLogin:
|
||||
max_retries=5,
|
||||
pad_tracks=True,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Track:
|
||||
|
||||
@@ -659,6 +674,7 @@ class DeeLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
|
||||
@@ -678,6 +694,8 @@ class DeeLogin:
|
||||
initial_retry_delay=30,
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Episode:
|
||||
|
||||
@@ -714,7 +732,12 @@ class DeeLogin:
|
||||
preferences.recursive_download = recursive_download
|
||||
preferences.not_interface = not_interface
|
||||
# No convert_to for episode download (and preferences.convert_to is not set here)
|
||||
preferences.max_retries = max_retries
|
||||
# Audio conversion parameters
|
||||
preferences.convert_to = convert_to
|
||||
preferences.bitrate = bitrate
|
||||
preferences.save_cover = save_cover
|
||||
preferences.is_episode = True
|
||||
|
||||
episode = DW_EPISODE(preferences).dw()
|
||||
|
||||
@@ -735,6 +758,7 @@ class DeeLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Smart:
|
||||
|
||||
@@ -779,6 +803,7 @@ class DeeLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
smart.type = "track"
|
||||
@@ -807,6 +832,7 @@ class DeeLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
smart.type = "album"
|
||||
@@ -835,6 +861,7 @@ class DeeLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
smart.type = "playlist"
|
||||
|
||||
@@ -15,6 +15,7 @@ AUDIO_FORMATS = {
|
||||
"extension": ".mp3",
|
||||
"mime": "audio/mpeg",
|
||||
"ffmpeg_codec": "libmp3lame",
|
||||
"ffmpeg_format_flag": "mp3",
|
||||
"default_bitrate": "320k",
|
||||
"bitrates": ["32k", "64k", "96k", "128k", "192k", "256k", "320k"],
|
||||
},
|
||||
@@ -22,6 +23,7 @@ AUDIO_FORMATS = {
|
||||
"extension": ".m4a",
|
||||
"mime": "audio/mp4",
|
||||
"ffmpeg_codec": "aac",
|
||||
"ffmpeg_format_flag": "ipod",
|
||||
"default_bitrate": "256k",
|
||||
"bitrates": ["32k", "64k", "96k", "128k", "192k", "256k"],
|
||||
},
|
||||
@@ -29,6 +31,7 @@ AUDIO_FORMATS = {
|
||||
"extension": ".ogg",
|
||||
"mime": "audio/ogg",
|
||||
"ffmpeg_codec": "libvorbis",
|
||||
"ffmpeg_format_flag": "ogg",
|
||||
"default_bitrate": "256k",
|
||||
"bitrates": ["64k", "96k", "128k", "192k", "256k", "320k"],
|
||||
},
|
||||
@@ -36,6 +39,7 @@ AUDIO_FORMATS = {
|
||||
"extension": ".opus",
|
||||
"mime": "audio/opus",
|
||||
"ffmpeg_codec": "libopus",
|
||||
"ffmpeg_format_flag": "opus",
|
||||
"default_bitrate": "128k",
|
||||
"bitrates": ["32k", "64k", "96k", "128k", "192k", "256k"],
|
||||
},
|
||||
@@ -43,6 +47,7 @@ AUDIO_FORMATS = {
|
||||
"extension": ".flac",
|
||||
"mime": "audio/flac",
|
||||
"ffmpeg_codec": "flac",
|
||||
"ffmpeg_format_flag": "flac",
|
||||
"default_bitrate": None, # Lossless, no bitrate needed
|
||||
"bitrates": [],
|
||||
},
|
||||
@@ -50,6 +55,7 @@ AUDIO_FORMATS = {
|
||||
"extension": ".wav",
|
||||
"mime": "audio/wav",
|
||||
"ffmpeg_codec": "pcm_s16le",
|
||||
"ffmpeg_format_flag": "wav",
|
||||
"default_bitrate": None, # Lossless, no bitrate needed
|
||||
"bitrates": [],
|
||||
},
|
||||
@@ -57,6 +63,7 @@ AUDIO_FORMATS = {
|
||||
"extension": ".m4a",
|
||||
"mime": "audio/mp4",
|
||||
"ffmpeg_codec": "alac",
|
||||
"ffmpeg_format_flag": "ipod",
|
||||
"default_bitrate": None, # Lossless, no bitrate needed
|
||||
"bitrates": [],
|
||||
}
|
||||
@@ -69,50 +76,6 @@ def check_ffmpeg_available():
|
||||
return False
|
||||
return True
|
||||
|
||||
def parse_format_string(format_string):
|
||||
"""
|
||||
Parse a format string like "MP3_320" into (format, bitrate).
|
||||
Returns (format_name, bitrate) or (None, None) if invalid.
|
||||
"""
|
||||
if not format_string or format_string.lower() == "false":
|
||||
return None, None
|
||||
|
||||
# Check for format with bitrate specification
|
||||
format_match = re.match(r"^([A-Za-z]+)(?:_(\d+[kK]))?$", format_string)
|
||||
if format_match:
|
||||
format_name = format_match.group(1).upper()
|
||||
bitrate = format_match.group(2)
|
||||
|
||||
# Validate format name
|
||||
if format_name not in AUDIO_FORMATS:
|
||||
logger.warning(f"Unknown audio format: {format_name}. Using original format.")
|
||||
return None, None
|
||||
|
||||
# If format is lossless but bitrate was specified, log a warning
|
||||
if bitrate and AUDIO_FORMATS[format_name]["default_bitrate"] is None:
|
||||
logger.warning(f"Bitrate specified for lossless format {format_name}. Ignoring bitrate.")
|
||||
bitrate = None
|
||||
|
||||
# If bitrate wasn't specified, use default
|
||||
if not bitrate and AUDIO_FORMATS[format_name]["default_bitrate"]:
|
||||
bitrate = AUDIO_FORMATS[format_name]["default_bitrate"]
|
||||
|
||||
# Validate bitrate if specified
|
||||
if bitrate and AUDIO_FORMATS[format_name]["bitrates"] and bitrate.lower() not in [b.lower() for b in AUDIO_FORMATS[format_name]["bitrates"]]:
|
||||
logger.warning(f"Invalid bitrate {bitrate} for {format_name}. Using default {AUDIO_FORMATS[format_name]['default_bitrate']}.")
|
||||
bitrate = AUDIO_FORMATS[format_name]["default_bitrate"]
|
||||
|
||||
return format_name, bitrate
|
||||
|
||||
# Simple format name without bitrate
|
||||
if format_string.upper() in AUDIO_FORMATS:
|
||||
format_name = format_string.upper()
|
||||
bitrate = AUDIO_FORMATS[format_name]["default_bitrate"]
|
||||
return format_name, bitrate
|
||||
|
||||
logger.warning(f"Invalid format specification: {format_string}. Using original format.")
|
||||
return None, None
|
||||
|
||||
def get_output_path(input_path, format_name):
|
||||
"""Get the output path with the new extension based on the format."""
|
||||
if not format_name or format_name not in AUDIO_FORMATS:
|
||||
@@ -155,7 +118,7 @@ def convert_audio(input_path, format_name=None, bitrate=None, register_func=None
|
||||
Args:
|
||||
input_path: Path to the input audio file
|
||||
format_name: Target format name (e.g., 'MP3', 'OGG', 'FLAC')
|
||||
bitrate: Target bitrate (e.g., '320k', '128k')
|
||||
bitrate: Target bitrate (e.g., '320k', '128k'). If None, uses default for lossy formats.
|
||||
register_func: Function to register a file as being actively downloaded
|
||||
unregister_func: Function to unregister a file from the active downloads list
|
||||
|
||||
@@ -176,21 +139,45 @@ def convert_audio(input_path, format_name=None, bitrate=None, register_func=None
|
||||
return input_path
|
||||
|
||||
# Validate format and get format details
|
||||
if format_name not in AUDIO_FORMATS:
|
||||
format_name_upper = format_name.upper()
|
||||
if format_name_upper not in AUDIO_FORMATS:
|
||||
logger.warning(f"Unknown format: {format_name}. Using original format.")
|
||||
return input_path
|
||||
|
||||
format_details = AUDIO_FORMATS[format_name]
|
||||
format_details = AUDIO_FORMATS[format_name_upper]
|
||||
|
||||
# Skip conversion if the file is already in the target format
|
||||
# Determine effective bitrate
|
||||
effective_bitrate = bitrate
|
||||
if format_details["default_bitrate"] is not None: # Lossy format
|
||||
if effective_bitrate:
|
||||
# Validate provided bitrate
|
||||
if effective_bitrate.lower() not in [b.lower() for b in format_details["bitrates"]]:
|
||||
logger.warning(f"Invalid bitrate {effective_bitrate} for {format_name_upper}. Using default {format_details['default_bitrate']}.")
|
||||
effective_bitrate = format_details["default_bitrate"]
|
||||
else: # No bitrate provided for lossy format, use default
|
||||
effective_bitrate = format_details["default_bitrate"]
|
||||
elif effective_bitrate: # Lossless format but bitrate was specified
|
||||
logger.warning(f"Bitrate specified for lossless format {format_name_upper}. Ignoring bitrate.")
|
||||
effective_bitrate = None
|
||||
|
||||
# Skip conversion if the file is already in the target format and bitrate matches (or not applicable)
|
||||
if input_path.lower().endswith(format_details["extension"].lower()):
|
||||
# Only do conversion if a specific bitrate is requested
|
||||
if not bitrate or format_details["default_bitrate"] is None:
|
||||
logger.info(f"File {input_path} is already in {format_name} format. Skipping conversion.")
|
||||
return input_path
|
||||
# For lossless, or if effective_bitrate matches (or no specific bitrate needed for format)
|
||||
if format_details["default_bitrate"] is None: # Lossless
|
||||
logger.info(f"File {input_path} is already in {format_name_upper} (lossless) format. Skipping conversion.")
|
||||
return input_path
|
||||
# For lossy, if no specific bitrate was relevant (already handled by effective_bitrate logic)
|
||||
# This condition might be redundant if we always convert to ensure bitrate.
|
||||
# Let's assume for now, if it's already the right extension, we don't re-encode unless bitrate implies so.
|
||||
# However, the original logic converted if bitrate was specified even for same extension.
|
||||
# To maintain similar behavior: if a bitrate is effectively set for a lossy format, we proceed.
|
||||
# If effective_bitrate is None (e.g. for FLAC, WAV), and extension matches, skip.
|
||||
if not effective_bitrate and format_details["default_bitrate"] is not None:
|
||||
logger.info(f"File {input_path} is already in {format_name_upper} format with a suitable bitrate. Skipping conversion.")
|
||||
return input_path
|
||||
|
||||
# Get the output path
|
||||
output_path = get_output_path(input_path, format_name)
|
||||
output_path = get_output_path(input_path, format_name_upper)
|
||||
|
||||
# Use a temporary file for the conversion to avoid conflicts
|
||||
temp_output = output_path + ".tmp"
|
||||
@@ -201,24 +188,28 @@ def convert_audio(input_path, format_name=None, bitrate=None, register_func=None
|
||||
try:
|
||||
cmd = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", "-i", input_path]
|
||||
|
||||
# Add bitrate parameter for lossy formats
|
||||
if bitrate and format_details["bitrates"]:
|
||||
cmd.extend(["-b:a", bitrate])
|
||||
# Add bitrate parameter for lossy formats if an effective_bitrate is set
|
||||
if effective_bitrate and format_details["bitrates"]: # format_details["bitrates"] implies lossy
|
||||
cmd.extend(["-b:a", effective_bitrate])
|
||||
|
||||
# Add codec parameter
|
||||
cmd.extend(["-c:a", format_details["ffmpeg_codec"]])
|
||||
|
||||
# Add format flag
|
||||
if "ffmpeg_format_flag" in format_details:
|
||||
cmd.extend(["-f", format_details["ffmpeg_format_flag"]])
|
||||
|
||||
# For some formats, add additional parameters
|
||||
if format_name == "MP3":
|
||||
if format_name_upper == "MP3":
|
||||
# Use high quality settings for MP3
|
||||
if not bitrate or int(bitrate.replace('k', '')) >= 256:
|
||||
if not effective_bitrate or int(effective_bitrate.replace('k', '')) >= 256:
|
||||
cmd.extend(["-q:a", "0"])
|
||||
|
||||
# Add output file
|
||||
cmd.append(temp_output)
|
||||
|
||||
# Run the conversion
|
||||
logger.info(f"Converting {input_path} to {format_name}" + (f" at {bitrate}" if bitrate else ""))
|
||||
logger.info(f"Converting {input_path} to {format_name_upper}" + (f" at {effective_bitrate}" if effective_bitrate else ""))
|
||||
process = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
|
||||
if process.returncode != 0:
|
||||
@@ -240,7 +231,7 @@ def convert_audio(input_path, format_name=None, bitrate=None, register_func=None
|
||||
os.remove(input_path)
|
||||
unregister_active_download(input_path)
|
||||
|
||||
logger.info(f"Successfully converted to {format_name}" + (f" at {bitrate}" if bitrate else ""))
|
||||
logger.info(f"Successfully converted to {format_name_upper}" + (f" at {effective_bitrate}" if effective_bitrate else ""))
|
||||
return output_path
|
||||
|
||||
except Exception as e:
|
||||
|
||||
64
deezspot/libutils/cleanup_utils.py
Normal file
64
deezspot/libutils/cleanup_utils.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import atexit
|
||||
from deezspot.libutils.logging_utils import logger
|
||||
|
||||
# --- Global tracking of active downloads ---
|
||||
ACTIVE_DOWNLOADS = set()
|
||||
CLEANUP_LOCK = False
|
||||
CURRENT_DOWNLOAD = None
|
||||
|
||||
def register_active_download(file_path):
|
||||
"""Register a file as being actively downloaded"""
|
||||
global CURRENT_DOWNLOAD
|
||||
ACTIVE_DOWNLOADS.add(file_path)
|
||||
CURRENT_DOWNLOAD = file_path
|
||||
|
||||
def unregister_active_download(file_path):
|
||||
"""Remove a file from the active downloads list"""
|
||||
global CURRENT_DOWNLOAD
|
||||
if file_path in ACTIVE_DOWNLOADS:
|
||||
ACTIVE_DOWNLOADS.remove(file_path)
|
||||
if CURRENT_DOWNLOAD == file_path:
|
||||
CURRENT_DOWNLOAD = None
|
||||
|
||||
def cleanup_active_downloads():
|
||||
"""Clean up any incomplete downloads during process termination"""
|
||||
global CLEANUP_LOCK, CURRENT_DOWNLOAD
|
||||
if CLEANUP_LOCK:
|
||||
return
|
||||
|
||||
CLEANUP_LOCK = True
|
||||
# Only remove the file that was in progress when stopped
|
||||
if CURRENT_DOWNLOAD:
|
||||
try:
|
||||
if os.path.exists(CURRENT_DOWNLOAD):
|
||||
logger.info(f"Removing incomplete download: {CURRENT_DOWNLOAD}")
|
||||
os.remove(CURRENT_DOWNLOAD)
|
||||
# No need to call unregister_active_download here,
|
||||
# as the process is terminating.
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up file {CURRENT_DOWNLOAD}: {str(e)}")
|
||||
CLEANUP_LOCK = False
|
||||
|
||||
# Register the cleanup function to run on exit
|
||||
atexit.register(cleanup_active_downloads)
|
||||
|
||||
# Set up signal handlers
|
||||
def signal_handler(sig, frame):
|
||||
logger.info(f"Received termination signal {sig}. Cleaning up...")
|
||||
cleanup_active_downloads()
|
||||
if sig == signal.SIGINT:
|
||||
logger.info("CTRL+C received. Exiting...")
|
||||
sys.exit(0)
|
||||
|
||||
# Register signal handlers for common termination signals
|
||||
signal.signal(signal.SIGINT, signal_handler) # CTRL+C
|
||||
signal.signal(signal.SIGTERM, signal_handler) # Normal termination
|
||||
try:
|
||||
# These may not be available on all platforms
|
||||
signal.signal(signal.SIGHUP, signal_handler) # Terminal closed
|
||||
signal.signal(signal.SIGQUIT, signal_handler) # CTRL+\
|
||||
except AttributeError:
|
||||
pass
|
||||
159
deezspot/libutils/skip_detection.py
Normal file
159
deezspot/libutils/skip_detection.py
Normal file
@@ -0,0 +1,159 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
from mutagen import File
|
||||
from mutagen.easyid3 import EasyID3
|
||||
from mutagen.oggvorbis import OggVorbis
|
||||
from mutagen.flac import FLAC
|
||||
# from mutagen.mp4 import MP4 # MP4 is usually handled by File for .m4a
|
||||
|
||||
# AUDIO_FORMATS and get_output_path will be imported from audio_converter
|
||||
# We need to ensure this doesn't create circular dependencies.
|
||||
# If audio_converter also imports something from libutils that might import this,
|
||||
# it could be an issue. For now, proceeding with direct import.
|
||||
from deezspot.libutils.audio_converter import AUDIO_FORMATS, get_output_path
|
||||
|
||||
# Logger instance will be passed as an argument to functions that need it.
|
||||
|
||||
def read_metadata_from_file(file_path, logger):
|
||||
"""Reads title and album metadata from an audio file."""
|
||||
try:
|
||||
if not os.path.isfile(file_path):
|
||||
logger.debug(f"File not found for metadata reading: {file_path}")
|
||||
return None, None
|
||||
|
||||
audio = File(file_path, easy=False) # easy=False to access format-specific tags better
|
||||
if audio is None:
|
||||
logger.warning(f"Could not load audio file with mutagen: {file_path}")
|
||||
return None, None
|
||||
|
||||
title = None
|
||||
album = None
|
||||
|
||||
if isinstance(audio, EasyID3): # MP3
|
||||
title = audio.get('title', [None])[0]
|
||||
album = audio.get('album', [None])[0]
|
||||
elif isinstance(audio, OggVorbis): # OGG
|
||||
title = audio.get('TITLE', [None])[0] # Vorbis tags are case-insensitive but typically uppercase
|
||||
album = audio.get('ALBUM', [None])[0]
|
||||
elif isinstance(audio, FLAC): # FLAC
|
||||
title = audio.get('TITLE', [None])[0]
|
||||
album = audio.get('ALBUM', [None])[0]
|
||||
elif file_path.lower().endswith('.m4a'): # M4A (AAC/ALAC)
|
||||
# Mutagen's File(filepath) for .m4a returns an MP4 object
|
||||
title = audio.get('\xa9nam', [None])[0] # iTunes title tag
|
||||
album = audio.get('\xa9alb', [None])[0] # iTunes album tag
|
||||
else:
|
||||
logger.warning(f"Unsupported file type for metadata extraction by read_metadata_from_file: {file_path} (type: {type(audio)})")
|
||||
return None, None
|
||||
|
||||
return title, album
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading metadata from {file_path}: {str(e)}")
|
||||
return None, None
|
||||
|
||||
def check_track_exists(original_song_path, title, album, convert_to, logger):
|
||||
"""Checks if a track exists, considering original and target converted formats.
|
||||
|
||||
Args:
|
||||
original_song_path (str): The expected path for the song in its original download format.
|
||||
title (str): The title of the track to check.
|
||||
album (str): The album of the track to check.
|
||||
convert_to (str | None): The target format for conversion (e.g., 'MP3', 'FLAC'), or None.
|
||||
logger (logging.Logger): Logger instance.
|
||||
|
||||
Returns:
|
||||
tuple[bool, str | None]: (True, path_to_existing_file) if exists, else (False, None).
|
||||
"""
|
||||
scan_dir = os.path.dirname(original_song_path)
|
||||
|
||||
if not os.path.exists(scan_dir):
|
||||
logger.debug(f"Scan directory {scan_dir} does not exist. Track cannot exist.")
|
||||
return False, None
|
||||
|
||||
# Priority 1: Check if the file exists in the target converted format
|
||||
if convert_to:
|
||||
target_format_upper = convert_to.upper()
|
||||
if target_format_upper in AUDIO_FORMATS:
|
||||
final_expected_converted_path = get_output_path(original_song_path, target_format_upper)
|
||||
final_target_ext = AUDIO_FORMATS[target_format_upper]["extension"].lower()
|
||||
|
||||
# Check exact predicted path for converted file
|
||||
if os.path.exists(final_expected_converted_path):
|
||||
existing_title, existing_album = read_metadata_from_file(final_expected_converted_path, logger)
|
||||
if existing_title == title and existing_album == album:
|
||||
logger.info(f"Found existing track (exact converted path match): {title} - {album} at {final_expected_converted_path}")
|
||||
return True, final_expected_converted_path
|
||||
|
||||
# Scan directory for other files with the target extension
|
||||
for file_in_dir in os.listdir(scan_dir):
|
||||
if file_in_dir.lower().endswith(final_target_ext):
|
||||
file_path_to_check = os.path.join(scan_dir, file_in_dir)
|
||||
# Skip if it's the same as the one we just checked (and it matched or didn't exist)
|
||||
if file_path_to_check == final_expected_converted_path and os.path.exists(final_expected_converted_path):
|
||||
continue
|
||||
existing_title, existing_album = read_metadata_from_file(file_path_to_check, logger)
|
||||
if existing_title == title and existing_album == album:
|
||||
logger.info(f"Found existing track (converted extension scan): {title} - {album} at {file_path_to_check}")
|
||||
return True, file_path_to_check
|
||||
|
||||
# If conversion is specified, and we didn't find the converted file, we should not report other formats as existing.
|
||||
# The intention is to get the file in the `convert_to` format.
|
||||
return False, None
|
||||
else:
|
||||
logger.warning(f"Invalid convert_to format: '{convert_to}'. Checking for original/general format.")
|
||||
# Fall through to check original/general if convert_to was invalid
|
||||
|
||||
# Priority 2: Check if the file exists in its original download format
|
||||
original_ext_lower = os.path.splitext(original_song_path)[1].lower()
|
||||
|
||||
if os.path.exists(original_song_path):
|
||||
existing_title, existing_album = read_metadata_from_file(original_song_path, logger)
|
||||
if existing_title == title and existing_album == album:
|
||||
logger.info(f"Found existing track (exact original path match): {title} - {album} at {original_song_path}")
|
||||
return True, original_song_path
|
||||
|
||||
# Scan directory for other files with the original extension (if no conversion target)
|
||||
for file_in_dir in os.listdir(scan_dir):
|
||||
if file_in_dir.lower().endswith(original_ext_lower):
|
||||
file_path_to_check = os.path.join(scan_dir, file_in_dir)
|
||||
if file_path_to_check == original_song_path: # Already checked this one
|
||||
continue
|
||||
existing_title, existing_album = read_metadata_from_file(file_path_to_check, logger)
|
||||
if existing_title == title and existing_album == album:
|
||||
logger.info(f"Found existing track (original extension scan): {title} - {album} at {file_path_to_check}")
|
||||
return True, file_path_to_check
|
||||
|
||||
# Priority 3: General scan for any known audio format if no conversion was specified OR if convert_to was invalid
|
||||
# This part only runs if convert_to is None or was an invalid format string.
|
||||
if not convert_to or (convert_to and convert_to.upper() not in AUDIO_FORMATS):
|
||||
for file_in_dir in os.listdir(scan_dir):
|
||||
file_lower = file_in_dir.lower()
|
||||
# Check against all known audio format extensions
|
||||
is_known_audio_format = False
|
||||
for fmt_details in AUDIO_FORMATS.values():
|
||||
if file_lower.endswith(fmt_details["extension"].lower()):
|
||||
is_known_audio_format = True
|
||||
break
|
||||
|
||||
if is_known_audio_format:
|
||||
# Skip if it's the original extension and we've already scanned for those
|
||||
if file_lower.endswith(original_ext_lower):
|
||||
# We've already checked exact original_song_path and scanned for original_ext_lower
|
||||
# so this specific file would have been caught unless it's the original_song_path itself,
|
||||
# or another file with original_ext_lower that didn't match metadata.
|
||||
# This avoids re-checking files already covered by Priority 2 logic more explicitly.
|
||||
pass # Let it proceed to metadata check if it wasn't an exact match path-wise
|
||||
|
||||
file_path_to_check = os.path.join(scan_dir, file_in_dir)
|
||||
# Avoid re-checking original_song_path if it exists, it was covered by Priority 2's exact match.
|
||||
if os.path.exists(original_song_path) and file_path_to_check == original_song_path:
|
||||
continue
|
||||
|
||||
existing_title, existing_album = read_metadata_from_file(file_path_to_check, logger)
|
||||
if existing_title == title and existing_album == album:
|
||||
logger.info(f"Found existing track (general audio format scan): {title} - {album} at {file_path_to_check}")
|
||||
return True, file_path_to_check
|
||||
|
||||
return False, None
|
||||
@@ -2,9 +2,6 @@ import traceback
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import signal
|
||||
import atexit
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
from os.path import isfile, dirname
|
||||
from librespot.core import Session
|
||||
@@ -14,7 +11,7 @@ from deezspot.spotloader.spotify_settings import qualities
|
||||
from deezspot.libutils.others_settings import answers
|
||||
from deezspot.__taggers__ import write_tags, check_track
|
||||
from librespot.audio.decoders import AudioQuality, VorbisOnlyAudioQuality
|
||||
from deezspot.libutils.audio_converter import convert_audio, parse_format_string
|
||||
from deezspot.libutils.audio_converter import convert_audio, AUDIO_FORMATS, get_output_path
|
||||
from os import (
|
||||
remove,
|
||||
system,
|
||||
@@ -35,74 +32,19 @@ from deezspot.libutils.utils import (
|
||||
save_cover_image,
|
||||
__get_dir as get_album_directory,
|
||||
)
|
||||
from mutagen import File
|
||||
from mutagen.easyid3 import EasyID3
|
||||
from mutagen.oggvorbis import OggVorbis
|
||||
from mutagen.flac import FLAC
|
||||
from mutagen.mp4 import MP4
|
||||
from deezspot.libutils.logging_utils import logger
|
||||
from deezspot.libutils.cleanup_utils import (
|
||||
register_active_download,
|
||||
unregister_active_download,
|
||||
)
|
||||
from deezspot.libutils.skip_detection import check_track_exists
|
||||
|
||||
# --- Global retry counter variables ---
|
||||
GLOBAL_RETRY_COUNT = 0
|
||||
GLOBAL_MAX_RETRIES = 100 # Adjust this value as needed
|
||||
|
||||
# --- Global tracking of active downloads ---
|
||||
ACTIVE_DOWNLOADS = set()
|
||||
CLEANUP_LOCK = False
|
||||
CURRENT_DOWNLOAD = None
|
||||
|
||||
def register_active_download(file_path):
|
||||
"""Register a file as being actively downloaded"""
|
||||
global CURRENT_DOWNLOAD
|
||||
ACTIVE_DOWNLOADS.add(file_path)
|
||||
CURRENT_DOWNLOAD = file_path
|
||||
|
||||
def unregister_active_download(file_path):
|
||||
"""Remove a file from the active downloads list"""
|
||||
global CURRENT_DOWNLOAD
|
||||
if file_path in ACTIVE_DOWNLOADS:
|
||||
ACTIVE_DOWNLOADS.remove(file_path)
|
||||
if CURRENT_DOWNLOAD == file_path:
|
||||
CURRENT_DOWNLOAD = None
|
||||
|
||||
def cleanup_active_downloads():
|
||||
"""Clean up any incomplete downloads during process termination"""
|
||||
global CLEANUP_LOCK, CURRENT_DOWNLOAD
|
||||
if CLEANUP_LOCK:
|
||||
return
|
||||
|
||||
CLEANUP_LOCK = True
|
||||
# Only remove the file that was in progress when stopped
|
||||
if CURRENT_DOWNLOAD:
|
||||
try:
|
||||
if os.path.exists(CURRENT_DOWNLOAD):
|
||||
logger.info(f"Removing incomplete download: {CURRENT_DOWNLOAD}")
|
||||
os.remove(CURRENT_DOWNLOAD)
|
||||
unregister_active_download(CURRENT_DOWNLOAD)
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up file {CURRENT_DOWNLOAD}: {str(e)}")
|
||||
CLEANUP_LOCK = False
|
||||
|
||||
# Register the cleanup function to run on exit
|
||||
atexit.register(cleanup_active_downloads)
|
||||
|
||||
# Set up signal handlers
|
||||
def signal_handler(sig, frame):
|
||||
logger.info(f"Received termination signal {sig}. Cleaning up...")
|
||||
cleanup_active_downloads()
|
||||
if sig == signal.SIGINT:
|
||||
logger.info("CTRL+C received. Exiting...")
|
||||
sys.exit(0)
|
||||
|
||||
# Register signal handlers for common termination signals
|
||||
signal.signal(signal.SIGINT, signal_handler) # CTRL+C
|
||||
signal.signal(signal.SIGTERM, signal_handler) # Normal termination
|
||||
try:
|
||||
# These may not be available on all platforms
|
||||
signal.signal(signal.SIGHUP, signal_handler) # Terminal closed
|
||||
signal.signal(signal.SIGQUIT, signal_handler) # CTRL+\
|
||||
except AttributeError:
|
||||
pass
|
||||
# Moved to deezspot.libutils.cleanup_utils
|
||||
|
||||
class Download_JOB:
|
||||
session = None
|
||||
@@ -145,6 +87,11 @@ class EASY_DW:
|
||||
self.__type = "episode" if preferences.is_episode else "track" # New type parameter
|
||||
self.__real_time_dl = preferences.real_time_dl
|
||||
self.__convert_to = getattr(preferences, 'convert_to', None)
|
||||
self.__bitrate = getattr(preferences, 'bitrate', None) # New bitrate attribute
|
||||
|
||||
# Ensure if convert_to is None, bitrate is also None
|
||||
if self.__convert_to is None:
|
||||
self.__bitrate = None
|
||||
|
||||
self.__c_quality = qualities[self.__quality_download]
|
||||
self.__fallback_ids = self.__ids
|
||||
@@ -240,24 +187,32 @@ class EASY_DW:
|
||||
# Step 2: Convert to requested format if specified (e.g., MP3, FLAC)
|
||||
conversion_to_another_format_occurred_and_cleared_state = False
|
||||
if self.__convert_to:
|
||||
format_name, bitrate = parse_format_string(self.__convert_to)
|
||||
format_name = self.__convert_to
|
||||
bitrate = self.__bitrate
|
||||
if format_name:
|
||||
try:
|
||||
# convert_audio is expected to handle its own input/output registration/unregistration.
|
||||
# Input to convert_audio is self.__song_path (the .ogg path).
|
||||
# On success, convert_audio should unregister its input and its output,
|
||||
# leaving CURRENT_DOWNLOAD as None.
|
||||
path_before_final_conversion = self.__song_path # Current path, e.g., .ogg
|
||||
converted_path = convert_audio(
|
||||
self.__song_path, # Current .ogg path
|
||||
path_before_final_conversion,
|
||||
format_name,
|
||||
bitrate,
|
||||
register_active_download,
|
||||
unregister_active_download
|
||||
)
|
||||
if converted_path != self.__song_path:
|
||||
# Update the path to the converted file
|
||||
self.__song_path = converted_path
|
||||
self.__c_track.song_path = converted_path # Ensure track object has the final path
|
||||
if converted_path != path_before_final_conversion:
|
||||
# Conversion to a new format happened and path changed
|
||||
self.__song_path = converted_path # Update EASY_DW's current song path
|
||||
|
||||
current_object_path_attr_name = 'song_path' if self.__type == "track" else 'episode_path'
|
||||
current_media_object = self.__c_track if self.__type == "track" else self.__c_episode
|
||||
|
||||
if current_media_object:
|
||||
setattr(current_media_object, current_object_path_attr_name, converted_path)
|
||||
_, new_ext = os.path.splitext(converted_path)
|
||||
if new_ext:
|
||||
current_media_object.file_format = new_ext.lower()
|
||||
# Also update EASY_DW's internal __file_format
|
||||
self.__file_format = new_ext.lower()
|
||||
|
||||
conversion_to_another_format_occurred_and_cleared_state = True
|
||||
except Exception as conv_error:
|
||||
@@ -266,8 +221,6 @@ class EASY_DW:
|
||||
# We want to keep it, so CURRENT_DOWNLOAD should remain set to this .ogg path.
|
||||
logger.error(f"Audio conversion to {format_name} error: {str(conv_error)}")
|
||||
# conversion_to_another_format_occurred_and_cleared_state remains False.
|
||||
# else: format_name was None after parsing __convert_to. No specific conversion attempt.
|
||||
# conversion_to_another_format_occurred_and_cleared_state remains False.
|
||||
|
||||
# If no conversion to another format was requested, or if it was requested but didn't effectively run
|
||||
# (e.g. format_name was None), or if convert_audio failed to clear state (which would be its bug),
|
||||
@@ -292,6 +245,7 @@ class EASY_DW:
|
||||
# Re-throw the exception. If a file (like og_song_path_for_ogg_output) was registered
|
||||
# and an error occurred, it remains registered for atexit cleanup, which is intended.
|
||||
raise e
|
||||
|
||||
def get_no_dw_track(self) -> Track:
|
||||
return self.__c_track
|
||||
|
||||
@@ -346,95 +300,60 @@ class EASY_DW:
|
||||
if hasattr(self, '_EASY_DW__c_track') and self.__c_track and self.__c_track.success:
|
||||
write_tags(self.__c_track)
|
||||
|
||||
# Unregister the final successful file path after all operations are done.
|
||||
# self.__c_track.song_path would have been updated by __convert_audio__ if conversion occurred.
|
||||
unregister_active_download(self.__c_track.song_path)
|
||||
|
||||
return self.__c_track
|
||||
|
||||
def track_exists(self, title, album):
|
||||
try:
|
||||
# Ensure the final song path is set
|
||||
if not hasattr(self, '_EASY_DW__song_path') or not self.__song_path:
|
||||
self.__set_song_path()
|
||||
|
||||
# Use only the final directory for scanning
|
||||
final_dir = os.path.dirname(self.__song_path)
|
||||
|
||||
# If the final directory doesn't exist, there are no files to check
|
||||
if not os.path.exists(final_dir):
|
||||
return False
|
||||
|
||||
# Iterate over files only in the final directory
|
||||
for file in os.listdir(final_dir):
|
||||
if file.lower().endswith(('.mp3', '.ogg', '.flac', '.wav', '.m4a', '.opus')):
|
||||
file_path = os.path.join(final_dir, file)
|
||||
existing_title, existing_album = self.read_metadata(file_path)
|
||||
if existing_title == title and existing_album == album:
|
||||
logger.info(f"Found existing track: {title} - {album}")
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking if track exists: {str(e)}")
|
||||
return False
|
||||
|
||||
def read_metadata(self, file_path):
|
||||
try:
|
||||
if not os.path.isfile(file_path):
|
||||
return None, None
|
||||
audio = File(file_path)
|
||||
if audio is None:
|
||||
return None, None
|
||||
title = None
|
||||
album = None
|
||||
if file_path.endswith('.mp3'):
|
||||
try:
|
||||
audio = EasyID3(file_path)
|
||||
title = audio.get('title', [None])[0]
|
||||
album = audio.get('album', [None])[0]
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading MP3 metadata: {str(e)}")
|
||||
elif file_path.endswith('.ogg'):
|
||||
audio = OggVorbis(file_path)
|
||||
title = audio.get('title', [None])[0]
|
||||
album = audio.get('album', [None])[0]
|
||||
elif file_path.endswith('.flac'):
|
||||
audio = FLAC(file_path)
|
||||
title = audio.get('title', [None])[0]
|
||||
album = audio.get('album', [None])[0]
|
||||
elif file_path.endswith('.m4a'):
|
||||
audio = MP4(file_path)
|
||||
title = audio.get('\xa9nam', [None])[0]
|
||||
album = audio.get('\xa9alb', [None])[0]
|
||||
else:
|
||||
return None, None
|
||||
return title, album
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading metadata from {file_path}: {str(e)}")
|
||||
return None, None
|
||||
|
||||
def download_try(self) -> Track:
|
||||
current_title = self.__song_metadata.get('music')
|
||||
current_album = self.__song_metadata.get('album')
|
||||
current_artist = self.__song_metadata.get('artist')
|
||||
|
||||
if self.track_exists(current_title, current_album):
|
||||
# Create skipped progress report using new format
|
||||
# Call the new check_track_exists function from skip_detection.py
|
||||
# It needs: original_song_path, title, album, convert_to, logger
|
||||
# self.__song_path is the original_song_path before any conversion attempts by this specific download operation.
|
||||
# self.__preferences.convert_to is the convert_to parameter.
|
||||
# logger is available as a global import in this module.
|
||||
exists, existing_file_path = check_track_exists(
|
||||
original_song_path=self.__song_path,
|
||||
title=current_title,
|
||||
album=current_album,
|
||||
convert_to=self.__preferences.convert_to,
|
||||
logger=logger # Pass the logger instance
|
||||
)
|
||||
|
||||
if exists and existing_file_path:
|
||||
logger.info(f"Track '{current_title}' by '{current_artist}' already exists at '{existing_file_path}'. Skipping download and conversion.")
|
||||
# Update the track object to point to the existing file
|
||||
self.__c_track.song_path = existing_file_path
|
||||
_, new_ext = os.path.splitext(existing_file_path)
|
||||
self.__c_track.file_format = new_ext.lower() # Ensure it's just the extension like '.mp3'
|
||||
# self.__c_track.song_quality might need re-evaluation if we could determine quality of existing file
|
||||
# For now, assume if it exists in target format, its quality is acceptable.
|
||||
|
||||
self.__c_track.success = True # Mark as success because the desired file is available
|
||||
self.__c_track.was_skipped = True
|
||||
|
||||
progress_data = {
|
||||
"type": "track",
|
||||
"song": current_title,
|
||||
"artist": current_artist,
|
||||
"status": "skipped",
|
||||
"url": self.__link,
|
||||
"reason": "Track already exists",
|
||||
"convert_to": self.__convert_to
|
||||
"reason": f"Track already exists in desired format at {existing_file_path}",
|
||||
"convert_to": self.__preferences.convert_to, # Reflect user's conversion preference
|
||||
"bitrate": self.__preferences.bitrate # Reflect user's bitrate preference
|
||||
}
|
||||
|
||||
# Add parent info based on parent type
|
||||
if self.__parent == "playlist" and hasattr(self.__preferences, "json_data"):
|
||||
playlist_data = self.__preferences.json_data
|
||||
playlist_name = playlist_data.get('name', 'unknown')
|
||||
total_tracks = playlist_data.get('tracks', {}).get('total', 'unknown')
|
||||
current_track = getattr(self.__preferences, 'track_number', 0)
|
||||
|
||||
current_track_num = getattr(self.__preferences, 'track_number', 0)
|
||||
progress_data.update({
|
||||
"current_track": current_track,
|
||||
"current_track": current_track_num,
|
||||
"total_tracks": total_tracks,
|
||||
"parent": {
|
||||
"type": "playlist",
|
||||
@@ -443,28 +362,24 @@ class EASY_DW:
|
||||
}
|
||||
})
|
||||
elif self.__parent == "album":
|
||||
album_name = self.__song_metadata.get('album', '')
|
||||
album_artist = self.__song_metadata.get('album_artist', self.__song_metadata.get('ar_album', ''))
|
||||
total_tracks = self.__song_metadata.get('nb_tracks', 0)
|
||||
current_track = getattr(self.__preferences, 'track_number', 0)
|
||||
|
||||
album_name_meta = self.__song_metadata.get('album', '')
|
||||
album_artist_meta = self.__song_metadata.get('album_artist', self.__song_metadata.get('ar_album', ''))
|
||||
total_tracks_meta = self.__song_metadata.get('nb_tracks', 0)
|
||||
current_track_num = getattr(self.__preferences, 'track_number', 0)
|
||||
progress_data.update({
|
||||
"current_track": current_track,
|
||||
"total_tracks": total_tracks,
|
||||
"current_track": current_track_num,
|
||||
"total_tracks": total_tracks_meta,
|
||||
"parent": {
|
||||
"type": "album",
|
||||
"title": album_name,
|
||||
"artist": album_artist
|
||||
"title": album_name_meta,
|
||||
"artist": album_artist_meta
|
||||
}
|
||||
})
|
||||
|
||||
Download_JOB.report_progress(progress_data)
|
||||
|
||||
# Mark track as intentionally skipped
|
||||
self.__c_track.success = False
|
||||
self.__c_track.was_skipped = True
|
||||
return self.__c_track
|
||||
|
||||
# If track does not exist in the desired final format, proceed with download/conversion
|
||||
retries = 0
|
||||
# Use the customizable retry parameters
|
||||
retry_delay = getattr(self.__preferences, 'initial_retry_delay', 30) # Default to 30 seconds
|
||||
@@ -526,7 +441,8 @@ class EASY_DW:
|
||||
"url": self.__link,
|
||||
"time_elapsed": int((current_time - start_time) * 1000),
|
||||
"progress": current_percentage,
|
||||
"convert_to": self.__convert_to
|
||||
"convert_to": self.__convert_to,
|
||||
"bitrate": self.__bitrate
|
||||
}
|
||||
|
||||
# Add parent info based on parent type
|
||||
@@ -632,7 +548,8 @@ class EASY_DW:
|
||||
"album": self.__song_metadata.get('album', ''),
|
||||
"error": str(e),
|
||||
"url": self.__link,
|
||||
"convert_to": self.__convert_to
|
||||
"convert_to": self.__convert_to,
|
||||
"bitrate": self.__bitrate
|
||||
}
|
||||
|
||||
# Add parent info based on parent type
|
||||
@@ -721,7 +638,8 @@ class EASY_DW:
|
||||
"artist": self.__song_metadata.get('artist', ''),
|
||||
"error": error_msg,
|
||||
"url": self.__link,
|
||||
"convert_to": self.__convert_to
|
||||
"convert_to": self.__convert_to,
|
||||
"bitrate": self.__bitrate
|
||||
}
|
||||
|
||||
# Add parent info based on parent type
|
||||
@@ -794,7 +712,6 @@ class EASY_DW:
|
||||
|
||||
if hasattr(self, '_EASY_DW__c_track') and self.__c_track:
|
||||
self.__c_track.success = True
|
||||
self.__write_track()
|
||||
write_tags(self.__c_track)
|
||||
|
||||
# Create done status report using the same format as progress status
|
||||
@@ -804,7 +721,8 @@ class EASY_DW:
|
||||
"artist": self.__song_metadata.get("artist", ""),
|
||||
"status": "done",
|
||||
"url": self.__link,
|
||||
"convert_to": self.__convert_to
|
||||
"convert_to": self.__convert_to,
|
||||
"bitrate": self.__bitrate
|
||||
}
|
||||
|
||||
# Add parent info based on parent type
|
||||
@@ -844,6 +762,12 @@ class EASY_DW:
|
||||
})
|
||||
|
||||
Download_JOB.report_progress(progress_data)
|
||||
|
||||
if hasattr(self, '_EASY_DW__c_track') and self.__c_track and self.__c_track.success:
|
||||
# Unregister the final successful file path after all operations are done.
|
||||
# self.__c_track.song_path would have been updated by __convert_audio__ if conversion occurred.
|
||||
unregister_active_download(self.__c_track.song_path)
|
||||
|
||||
return self.__c_track
|
||||
|
||||
def download_eps(self) -> Episode:
|
||||
@@ -853,11 +777,18 @@ class EASY_DW:
|
||||
max_retries = getattr(self.__preferences, 'max_retries', 5) # Default to 5 retries
|
||||
|
||||
retries = 0
|
||||
# Initialize success to False for the episode, to be set True on completion
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = False
|
||||
|
||||
if isfile(self.__song_path) and check_track(self.__c_episode):
|
||||
ans = input(
|
||||
f"Episode \"{self.__song_path}\" already exists, do you want to redownload it?(y or n):"
|
||||
)
|
||||
if not ans in answers:
|
||||
# If user chooses not to redownload, and file exists, consider it 'successful' for cleanup purposes if needed.
|
||||
# However, the main .success might be for actual download processing.
|
||||
# For now, just return. The file isn't in ACTIVE_DOWNLOADS from *this* run.
|
||||
return self.__c_episode
|
||||
episode_id = EpisodeId.from_base62(self.__ids)
|
||||
while True:
|
||||
@@ -868,177 +799,178 @@ class EASY_DW:
|
||||
False,
|
||||
None
|
||||
)
|
||||
# If load_episode is successful, break from retry loop
|
||||
break
|
||||
except Exception as e:
|
||||
global GLOBAL_RETRY_COUNT
|
||||
GLOBAL_RETRY_COUNT += 1
|
||||
retries += 1
|
||||
# Log retry attempt with structured data
|
||||
print(json.dumps({
|
||||
"status": "retrying",
|
||||
"retry_count": retries,
|
||||
"seconds_left": retry_delay,
|
||||
"song": self.__song_metadata['music'],
|
||||
"artist": self.__song_metadata['artist'],
|
||||
"album": self.__song_metadata['album'],
|
||||
"song": self.__song_metadata.get('music', 'Unknown Episode'),
|
||||
"artist": self.__song_metadata.get('artist', 'Unknown Show'),
|
||||
"album": self.__song_metadata.get('album', 'N/A'), # Episodes don't typically have albums
|
||||
"error": str(e),
|
||||
"convert_to": self.__convert_to
|
||||
"convert_to": self.__convert_to,
|
||||
"bitrate": self.__bitrate
|
||||
}))
|
||||
if retries >= max_retries or GLOBAL_RETRY_COUNT >= GLOBAL_MAX_RETRIES:
|
||||
# Clean up any partial files before giving up
|
||||
if os.path.exists(self.__song_path):
|
||||
os.remove(self.__song_path)
|
||||
# Add track info to exception
|
||||
track_name = self.__song_metadata.get('music', 'Unknown Track')
|
||||
artist_name = self.__song_metadata.get('artist', 'Unknown Artist')
|
||||
os.remove(self.__song_path) # Clean up partial file
|
||||
unregister_active_download(self.__song_path) # Unregister it
|
||||
track_name = self.__song_metadata.get('music', 'Unknown Episode')
|
||||
artist_name = self.__song_metadata.get('artist', 'Unknown Show')
|
||||
final_error_msg = f"Maximum retry limit reached for '{track_name}' by '{artist_name}' (local: {max_retries}, global: {GLOBAL_MAX_RETRIES}). Last error: {str(e)}"
|
||||
# Store error on track object
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = False
|
||||
self.__c_episode.error_message = final_error_msg
|
||||
raise Exception(final_error_msg) from e
|
||||
time.sleep(retry_delay)
|
||||
retry_delay += retry_delay_increase # Use the custom retry delay increase
|
||||
retry_delay += retry_delay_increase
|
||||
|
||||
total_size = stream.input_stream.size
|
||||
os.makedirs(dirname(self.__song_path), exist_ok=True)
|
||||
|
||||
# Register this file as being actively downloaded
|
||||
register_active_download(self.__song_path)
|
||||
register_active_download(self.__song_path) # Register before writing
|
||||
|
||||
try:
|
||||
with open(self.__song_path, "wb") as f:
|
||||
c_stream = stream.input_stream.stream()
|
||||
if self.__real_time_dl and self.__song_metadata.get("duration"):
|
||||
if self.__real_time_dl and self.__song_metadata.get("duration") and self.__song_metadata["duration"] > 0:
|
||||
# Restored Real-time download logic for episodes
|
||||
duration = self.__song_metadata["duration"]
|
||||
if duration > 0:
|
||||
rate_limit = total_size / duration
|
||||
chunk_size = 4096
|
||||
bytes_written = 0
|
||||
start_time = time.time()
|
||||
try:
|
||||
while True:
|
||||
chunk = c_stream.read(chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
f.write(chunk)
|
||||
bytes_written += len(chunk)
|
||||
# Could add progress reporting here
|
||||
expected_time = bytes_written / rate_limit
|
||||
elapsed_time = time.time() - start_time
|
||||
if expected_time > elapsed_time:
|
||||
time.sleep(expected_time - elapsed_time)
|
||||
except Exception as e:
|
||||
# If any error occurs during real-time download, delete the incomplete file
|
||||
logger.error(f"Error during real-time download: {str(e)}")
|
||||
try:
|
||||
c_stream.close()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
if os.path.exists(self.__song_path):
|
||||
os.remove(self.__song_path)
|
||||
# Add track info to exception
|
||||
track_name = self.__song_metadata.get('music', 'Unknown Track')
|
||||
artist_name = self.__song_metadata.get('artist', 'Unknown Artist')
|
||||
final_error_msg = f"Error during real-time download for '{track_name}' by '{artist_name}' (URL: {self.__link}). Error: {str(e)}"
|
||||
# Store error on track object
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = False
|
||||
self.__c_episode.error_message = final_error_msg
|
||||
raise TrackNotFound(message=final_error_msg, url=self.__link) from e
|
||||
else:
|
||||
try:
|
||||
data = c_stream.read(total_size)
|
||||
f.write(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error during episode download: {str(e)}")
|
||||
try:
|
||||
c_stream.close()
|
||||
except:
|
||||
pass
|
||||
if os.path.exists(self.__song_path):
|
||||
os.remove(self.__song_path)
|
||||
# Add track info to exception
|
||||
track_name = self.__song_metadata.get('music', 'Unknown Track')
|
||||
artist_name = self.__song_metadata.get('artist', 'Unknown Artist')
|
||||
final_error_msg = f"Error during episode download for '{track_name}' by '{artist_name}' (URL: {self.__link}). Error: {str(e)}"
|
||||
# Store error on track object
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = False
|
||||
self.__c_episode.error_message = final_error_msg
|
||||
raise TrackNotFound(message=final_error_msg, url=self.__link) from e
|
||||
else:
|
||||
rate_limit = total_size / duration
|
||||
chunk_size = 4096
|
||||
bytes_written = 0
|
||||
start_time = time.time()
|
||||
try:
|
||||
data = c_stream.read(total_size)
|
||||
f.write(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error during episode download: {str(e)}")
|
||||
try:
|
||||
c_stream.close()
|
||||
except:
|
||||
pass
|
||||
while True:
|
||||
chunk = c_stream.read(chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
f.write(chunk)
|
||||
bytes_written += len(chunk)
|
||||
# Optional: Real-time progress reporting for episodes (can be added here if desired)
|
||||
# Matching the style of download_try, no specific progress report inside this loop for episodes by default.
|
||||
expected_time = bytes_written / rate_limit
|
||||
elapsed_time = time.time() - start_time
|
||||
if expected_time > elapsed_time:
|
||||
time.sleep(expected_time - elapsed_time)
|
||||
except Exception as e_realtime:
|
||||
# If any error occurs during real-time download, clean up
|
||||
if not c_stream.closed:
|
||||
try:
|
||||
c_stream.close()
|
||||
except:
|
||||
pass
|
||||
# f.close() is handled by with statement, but an explicit one might be here if not using with.
|
||||
if os.path.exists(self.__song_path):
|
||||
os.remove(self.__song_path)
|
||||
# Add track info to exception
|
||||
track_name = self.__song_metadata.get('music', 'Unknown Track')
|
||||
artist_name = self.__song_metadata.get('artist', 'Unknown Artist')
|
||||
final_error_msg = f"Error during episode download for '{track_name}' by '{artist_name}' (URL: {self.__link}). Error: {str(e)}"
|
||||
# Store error on track object
|
||||
try:
|
||||
os.remove(self.__song_path)
|
||||
except:
|
||||
pass
|
||||
unregister_active_download(self.__song_path)
|
||||
episode_title = self.__song_metadata.get('music', 'Unknown Episode')
|
||||
artist_name = self.__song_metadata.get('artist', 'Unknown Show')
|
||||
final_error_msg = f"Error during real-time download for episode '{episode_title}' by '{artist_name}' (URL: {self.__link}). Error: {str(e_realtime)}"
|
||||
logger.error(final_error_msg)
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = False
|
||||
self.__c_episode.error_message = final_error_msg
|
||||
raise TrackNotFound(message=final_error_msg, url=self.__link) from e
|
||||
c_stream.close()
|
||||
except Exception as e:
|
||||
# Clean up the file on any error
|
||||
raise TrackNotFound(message=final_error_msg, url=self.__link) from e_realtime
|
||||
else:
|
||||
# Restored Non real-time download logic for episodes
|
||||
try:
|
||||
data = c_stream.read(total_size)
|
||||
f.write(data)
|
||||
except Exception as e_standard:
|
||||
# If any error occurs during standard download, clean up
|
||||
if not c_stream.closed:
|
||||
try:
|
||||
c_stream.close()
|
||||
except:
|
||||
pass
|
||||
if os.path.exists(self.__song_path):
|
||||
try:
|
||||
os.remove(self.__song_path)
|
||||
except:
|
||||
pass
|
||||
unregister_active_download(self.__song_path)
|
||||
episode_title = self.__song_metadata.get('music', 'Unknown Episode')
|
||||
artist_name = self.__song_metadata.get('artist', 'Unknown Show')
|
||||
final_error_msg = f"Error during standard download for episode '{episode_title}' by '{artist_name}' (URL: {self.__link}). Error: {str(e_standard)}"
|
||||
logger.error(final_error_msg)
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = False
|
||||
self.__c_episode.error_message = final_error_msg
|
||||
raise TrackNotFound(message=final_error_msg, url=self.__link) from e_standard
|
||||
|
||||
# If all went well with writing to file and reading stream:
|
||||
if not c_stream.closed: c_stream.close()
|
||||
|
||||
# If with open completes without internal exceptions leading to TrackNotFound:
|
||||
unregister_active_download(self.__song_path) # Unregister after successful write of original file
|
||||
|
||||
except TrackNotFound: # Re-raise if it was an internally handled download error
|
||||
raise
|
||||
except Exception as e_outer: # Catch other potential errors around file handling or unexpected issues
|
||||
# Cleanup for download part if an unexpected error occurs outside the inner try-excepts
|
||||
if 'c_stream' in locals() and hasattr(c_stream, 'closed') and not c_stream.closed:
|
||||
try: c_stream.close()
|
||||
except: pass
|
||||
if os.path.exists(self.__song_path):
|
||||
os.remove(self.__song_path)
|
||||
try: os.remove(self.__song_path)
|
||||
except: pass
|
||||
unregister_active_download(self.__song_path)
|
||||
episode_title = self.__song_metadata.get('music', 'Unknown Episode')
|
||||
error_message = f"Failed to download episode '{episode_title}' (URL: {self.__link}). Error: {str(e)}"
|
||||
error_message = f"Failed to download episode '{episode_title}' (URL: {self.__link}) during file operations. Error: {str(e_outer)}"
|
||||
logger.error(error_message)
|
||||
# Store error on episode object
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = False
|
||||
self.__c_episode.error_message = error_message
|
||||
raise TrackNotFound(message=error_message, url=self.__link) from e
|
||||
raise TrackNotFound(message=error_message, url=self.__link) from e_outer
|
||||
|
||||
# If download was successful, proceed to conversion and tagging
|
||||
try:
|
||||
self.__convert_audio()
|
||||
except Exception as e:
|
||||
self.__convert_audio() # This will update self.__c_episode.file_format and path if conversion occurs
|
||||
# It also handles registration/unregistration of intermediate/final files during conversion.
|
||||
except Exception as conv_e:
|
||||
# Conversion failed. __convert_audio or underlying convert_audio should have cleaned up its own temps.
|
||||
# The original downloaded file (if __convert_audio started from it) might still exist or be the self.__song_path.
|
||||
# Or self.__song_path might be a partially converted file if convert_audio failed mid-way and didn't cleanup perfectly.
|
||||
logger.error(json.dumps({
|
||||
"status": "retrying",
|
||||
"status": "error",
|
||||
"action": "convert_audio",
|
||||
"song": self.__song_metadata['music'],
|
||||
"artist": self.__song_metadata['artist'],
|
||||
"album": self.__song_metadata['album'],
|
||||
"error": str(e),
|
||||
"convert_to": self.__convert_to
|
||||
"song": self.__song_metadata.get('music', 'Unknown Episode'),
|
||||
"artist": self.__song_metadata.get('artist', 'Unknown Show'),
|
||||
"album": self.__song_metadata.get('album', 'N/A'),
|
||||
"error": str(conv_e),
|
||||
"convert_to": self.__convert_to,
|
||||
"bitrate": self.__bitrate
|
||||
}))
|
||||
# Clean up if conversion fails
|
||||
# Attempt to remove self.__song_path, which is the latest known path for this episode
|
||||
if os.path.exists(self.__song_path):
|
||||
os.remove(self.__song_path)
|
||||
unregister_active_download(self.__song_path) # Unregister it as it failed/was removed
|
||||
|
||||
time.sleep(retry_delay)
|
||||
retry_delay += retry_delay_increase # Use the custom retry delay increase
|
||||
try:
|
||||
self.__convert_audio()
|
||||
except Exception as conv_e:
|
||||
# If conversion fails twice, clean up and raise
|
||||
if os.path.exists(self.__song_path):
|
||||
os.remove(self.__song_path)
|
||||
episode_title = self.__song_metadata.get('music', 'Unknown Episode')
|
||||
error_message = f"Audio conversion for episode '{episode_title}' failed after retry. Original error: {str(conv_e)}"
|
||||
logger.error(error_message)
|
||||
# Store error on episode object
|
||||
episode_title = self.__song_metadata.get('music', 'Unknown Episode')
|
||||
error_message = f"Audio conversion for episode '{episode_title}' failed. Original error: {str(conv_e)}"
|
||||
logger.error(error_message)
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = False
|
||||
self.__c_episode.error_message = error_message
|
||||
raise TrackNotFound(message=error_message, url=self.__link) from conv_e
|
||||
raise TrackNotFound(message=error_message, url=self.__link) from conv_e
|
||||
|
||||
# Write metadata tags so subsequent skips work
|
||||
write_tags(self.__c_episode)
|
||||
# If we reach here, download and any conversion were successful.
|
||||
if hasattr(self, '_EASY_DW__c_episode') and self.__c_episode:
|
||||
self.__c_episode.success = True
|
||||
write_tags(self.__c_episode)
|
||||
# Unregister the final successful file path for episodes, as it's now complete.
|
||||
# self.__c_episode.episode_path would have been updated by __convert_audio__ if conversion occurred.
|
||||
unregister_active_download(self.__c_episode.episode_path)
|
||||
|
||||
return self.__c_episode
|
||||
|
||||
|
||||
@@ -97,6 +97,7 @@ class SpoLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Track:
|
||||
try:
|
||||
@@ -123,7 +124,12 @@ class SpoLogin:
|
||||
preferences.initial_retry_delay = initial_retry_delay
|
||||
preferences.retry_delay_increase = retry_delay_increase
|
||||
preferences.max_retries = max_retries
|
||||
preferences.convert_to = convert_to
|
||||
if convert_to is None:
|
||||
preferences.convert_to = None
|
||||
preferences.bitrate = None
|
||||
else:
|
||||
preferences.convert_to = convert_to
|
||||
preferences.bitrate = bitrate
|
||||
preferences.save_cover = save_cover
|
||||
|
||||
track = DW_TRACK(preferences).dw()
|
||||
@@ -150,6 +156,7 @@ class SpoLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Album:
|
||||
try:
|
||||
@@ -180,7 +187,12 @@ class SpoLogin:
|
||||
preferences.initial_retry_delay = initial_retry_delay
|
||||
preferences.retry_delay_increase = retry_delay_increase
|
||||
preferences.max_retries = max_retries
|
||||
preferences.convert_to = convert_to
|
||||
if convert_to is None:
|
||||
preferences.convert_to = None
|
||||
preferences.bitrate = None
|
||||
else:
|
||||
preferences.convert_to = convert_to
|
||||
preferences.bitrate = bitrate
|
||||
preferences.save_cover = save_cover
|
||||
|
||||
album = DW_ALBUM(preferences).dw()
|
||||
@@ -207,6 +219,7 @@ class SpoLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Playlist:
|
||||
try:
|
||||
@@ -251,7 +264,12 @@ class SpoLogin:
|
||||
preferences.initial_retry_delay = initial_retry_delay
|
||||
preferences.retry_delay_increase = retry_delay_increase
|
||||
preferences.max_retries = max_retries
|
||||
preferences.convert_to = convert_to
|
||||
if convert_to is None:
|
||||
preferences.convert_to = None
|
||||
preferences.bitrate = None
|
||||
else:
|
||||
preferences.convert_to = convert_to
|
||||
preferences.bitrate = bitrate
|
||||
preferences.save_cover = save_cover
|
||||
|
||||
playlist = DW_PLAYLIST(preferences).dw()
|
||||
@@ -277,6 +295,7 @@ class SpoLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Episode:
|
||||
try:
|
||||
@@ -305,7 +324,12 @@ class SpoLogin:
|
||||
preferences.initial_retry_delay = initial_retry_delay
|
||||
preferences.retry_delay_increase = retry_delay_increase
|
||||
preferences.max_retries = max_retries
|
||||
preferences.convert_to = convert_to
|
||||
if convert_to is None:
|
||||
preferences.convert_to = None
|
||||
preferences.bitrate = None
|
||||
else:
|
||||
preferences.convert_to = convert_to
|
||||
preferences.bitrate = bitrate
|
||||
preferences.save_cover = save_cover
|
||||
|
||||
episode = DW_EPISODE(preferences).dw()
|
||||
@@ -333,7 +357,8 @@ class SpoLogin:
|
||||
initial_retry_delay=30,
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None
|
||||
convert_to=None,
|
||||
bitrate=None
|
||||
):
|
||||
"""
|
||||
Download all albums (or a subset based on album_type and limit) from an artist.
|
||||
@@ -370,7 +395,8 @@ class SpoLogin:
|
||||
initial_retry_delay=initial_retry_delay,
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate
|
||||
)
|
||||
downloaded_albums.append(downloaded_album)
|
||||
return downloaded_albums
|
||||
@@ -395,6 +421,7 @@ class SpoLogin:
|
||||
retry_delay_increase=30,
|
||||
max_retries=5,
|
||||
convert_to=None,
|
||||
bitrate=None,
|
||||
save_cover=stock_save_cover
|
||||
) -> Smart:
|
||||
try:
|
||||
@@ -425,6 +452,8 @@ class SpoLogin:
|
||||
initial_retry_delay=initial_retry_delay,
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
smart.type = "track"
|
||||
@@ -449,6 +478,7 @@ class SpoLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
smart.type = "album"
|
||||
@@ -473,6 +503,7 @@ class SpoLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
smart.type = "playlist"
|
||||
@@ -496,6 +527,7 @@ class SpoLogin:
|
||||
retry_delay_increase=retry_delay_increase,
|
||||
max_retries=max_retries,
|
||||
convert_to=convert_to,
|
||||
bitrate=bitrate,
|
||||
save_cover=save_cover
|
||||
)
|
||||
smart.type = "episode"
|
||||
|
||||
@@ -6,130 +6,237 @@ from deezspot.libutils.utils import convert_to_date
|
||||
import traceback
|
||||
from deezspot.libutils.logging_utils import logger
|
||||
|
||||
def tracking(ids, album=None):
|
||||
def _get_best_image_urls(images_list):
|
||||
urls = {'image': '', 'image2': '', 'image3': ''}
|
||||
if not images_list or not isinstance(images_list, list):
|
||||
return urls
|
||||
|
||||
# Sort images by area (height * width) in descending order
|
||||
# Handle cases where height or width might be missing
|
||||
sorted_images = sorted(
|
||||
images_list,
|
||||
key=lambda img: img.get('height', 0) * img.get('width', 0),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
if len(sorted_images) > 0:
|
||||
urls['image'] = sorted_images[0].get('url', '')
|
||||
if len(sorted_images) > 1:
|
||||
urls['image2'] = sorted_images[1].get('url', '') # Second largest or same if only one size
|
||||
if len(sorted_images) > 2:
|
||||
urls['image3'] = sorted_images[2].get('url', '') # Third largest
|
||||
|
||||
return urls
|
||||
|
||||
def tracking(ids, album_data_for_track=None):
|
||||
datas = {}
|
||||
try:
|
||||
json_track = Spo.get_track(ids)
|
||||
if not json_track:
|
||||
logger.error(f"Failed to get track details for ID: {ids} from Spotify API.")
|
||||
return None
|
||||
|
||||
if not album:
|
||||
album_ids = json_track['album']['id']
|
||||
json_album = Spo.get_album(album_ids)
|
||||
datas['image'] = json_album['images'][0]['url']
|
||||
datas['image2'] = json_album['images'][1]['url']
|
||||
datas['image3'] = json_album['images'][2]['url']
|
||||
datas['genre'] = "; ".join(json_album['genres'])
|
||||
# Album details section
|
||||
# Use provided album_data_for_track if available (from tracking_album context)
|
||||
# Otherwise, fetch from track's album info or make a new API call for more details
|
||||
album_to_process = None
|
||||
fetch_full_album_details = False
|
||||
|
||||
ar_album = [
|
||||
artist['name']
|
||||
for artist in json_album['artists']
|
||||
]
|
||||
if album_data_for_track:
|
||||
album_to_process = album_data_for_track
|
||||
elif json_track.get('album'):
|
||||
album_to_process = json_track.get('album')
|
||||
# We might want fuller album details (like label, genres, upc, copyrights)
|
||||
# not present in track's nested album object.
|
||||
fetch_full_album_details = True
|
||||
|
||||
datas['ar_album'] = "; ".join(ar_album)
|
||||
datas['album'] = json_album['name']
|
||||
datas['label'] = json_album['label']
|
||||
if fetch_full_album_details and album_to_process and album_to_process.get('id'):
|
||||
full_album_json = Spo.get_album(album_to_process.get('id'))
|
||||
if full_album_json:
|
||||
album_to_process = full_album_json # Prioritize full album details
|
||||
|
||||
external_ids = json_album.get('external_ids', {})
|
||||
datas['upc'] = external_ids.get('upc', "Unknown")
|
||||
if album_to_process:
|
||||
image_urls = _get_best_image_urls(album_to_process.get('images', []))
|
||||
datas.update(image_urls)
|
||||
|
||||
datas['nb_tracks'] = json_album['total_tracks']
|
||||
datas['genre'] = "; ".join(album_to_process.get('genres', []))
|
||||
|
||||
datas['music'] = json_track['name']
|
||||
album_artists_data = album_to_process.get('artists', [])
|
||||
ar_album_names = [artist.get('name', '') for artist in album_artists_data if artist.get('name')]
|
||||
datas['ar_album'] = "; ".join(filter(None, ar_album_names)) or 'Unknown Artist'
|
||||
|
||||
artists = [
|
||||
artist['name']
|
||||
for artist in json_track['artists']
|
||||
]
|
||||
datas['album'] = album_to_process.get('name', 'Unknown Album')
|
||||
datas['label'] = album_to_process.get('label', '') # Often in full album, not track's album obj
|
||||
datas['album_type'] = album_to_process.get('album_type', 'unknown')
|
||||
|
||||
datas['artist'] = "; ".join(artists)
|
||||
datas['tracknum'] = json_track['track_number']
|
||||
datas['discnum'] = json_track['disc_number']
|
||||
copyrights_data = album_to_process.get('copyrights', [])
|
||||
datas['copyright'] = copyrights_data[0].get('text', '') if copyrights_data else ''
|
||||
|
||||
datas['year'] = convert_to_date(
|
||||
json_track['album']['release_date']
|
||||
)
|
||||
album_external_ids = album_to_process.get('external_ids', {})
|
||||
datas['upc'] = album_external_ids.get('upc', '')
|
||||
|
||||
datas['bpm'] = "Unknown"
|
||||
datas['duration'] = json_track['duration_ms'] // 1000
|
||||
datas['nb_tracks'] = album_to_process.get('total_tracks', 0)
|
||||
# Release date from album_to_process is likely more definitive
|
||||
datas['year'] = convert_to_date(album_to_process.get('release_date', ''))
|
||||
datas['release_date_precision'] = album_to_process.get('release_date_precision', 'unknown')
|
||||
else: # Fallback if no album_to_process
|
||||
datas.update(_get_best_image_urls([]))
|
||||
datas['genre'] = ''
|
||||
datas['ar_album'] = 'Unknown Artist'
|
||||
datas['album'] = json_track.get('album', {}).get('name', 'Unknown Album') # Basic fallback
|
||||
datas['label'] = ''
|
||||
datas['album_type'] = json_track.get('album', {}).get('album_type', 'unknown')
|
||||
datas['copyright'] = ''
|
||||
datas['upc'] = ''
|
||||
datas['nb_tracks'] = json_track.get('album', {}).get('total_tracks', 0)
|
||||
datas['year'] = convert_to_date(json_track.get('album', {}).get('release_date', ''))
|
||||
datas['release_date_precision'] = json_track.get('album', {}).get('release_date_precision', 'unknown')
|
||||
|
||||
external_ids = json_track.get('external_ids', {})
|
||||
datas['isrc'] = external_ids.get('isrc', 'Unknown')
|
||||
|
||||
datas['gain'] = "Unknown"
|
||||
# Track specific details
|
||||
datas['music'] = json_track.get('name', 'Unknown Track')
|
||||
|
||||
track_artists_data = json_track.get('artists', [])
|
||||
track_artist_names = [artist.get('name', '') for artist in track_artists_data if artist.get('name')]
|
||||
datas['artist'] = "; ".join(filter(None, track_artist_names)) or 'Unknown Artist'
|
||||
|
||||
datas['tracknum'] = json_track.get('track_number', 0)
|
||||
datas['discnum'] = json_track.get('disc_number', 0)
|
||||
|
||||
# If year details were not set from a more complete album object, use track's album info
|
||||
if not datas.get('year') and json_track.get('album'):
|
||||
datas['year'] = convert_to_date(json_track.get('album', {}).get('release_date', ''))
|
||||
datas['release_date_precision'] = json_track.get('album', {}).get('release_date_precision', 'unknown')
|
||||
|
||||
datas['duration'] = json_track.get('duration_ms', 0) // 1000
|
||||
|
||||
track_external_ids = json_track.get('external_ids', {})
|
||||
datas['isrc'] = track_external_ids.get('isrc', '')
|
||||
|
||||
datas['explicit'] = json_track.get('explicit', False)
|
||||
datas['popularity'] = json_track.get('popularity', 0)
|
||||
|
||||
# Placeholder for tags not directly from this API response but might be expected by tagger
|
||||
datas['bpm'] = datas.get('bpm', 'Unknown') # Not available here
|
||||
datas['gain'] = datas.get('gain', 'Unknown') # Not available here
|
||||
datas['lyric'] = datas.get('lyric', '') # Not available here
|
||||
datas['author'] = datas.get('author', '') # Not available here (lyricist)
|
||||
datas['composer'] = datas.get('composer', '') # Not available here
|
||||
# copyright is handled by album section
|
||||
datas['lyricist'] = datas.get('lyricist', '') # Same as author, not here
|
||||
datas['version'] = datas.get('version', '') # Not typically here
|
||||
|
||||
datas['ids'] = ids
|
||||
|
||||
logger.debug(f"Successfully tracked metadata for track {ids}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to track metadata for track {ids}: {str(e)}")
|
||||
traceback.print_exc()
|
||||
logger.debug(traceback.format_exc())
|
||||
return None
|
||||
|
||||
return datas
|
||||
|
||||
def tracking_album(album_json):
|
||||
if not album_json:
|
||||
logger.error("tracking_album received None or empty album_json.")
|
||||
return None
|
||||
|
||||
song_metadata = {}
|
||||
try:
|
||||
song_metadata = {
|
||||
"music": [],
|
||||
"artist": [],
|
||||
"tracknum": [],
|
||||
"discnum": [],
|
||||
"bpm": [],
|
||||
"duration": [],
|
||||
"isrc": [],
|
||||
"gain": [],
|
||||
"ids": [],
|
||||
"image": album_json['images'][0]['url'],
|
||||
"image2": album_json['images'][1]['url'],
|
||||
"image3": album_json['images'][2]['url'],
|
||||
"album": album_json['name'],
|
||||
"label": album_json['label'],
|
||||
"year": convert_to_date(album_json['release_date']),
|
||||
"nb_tracks": album_json['total_tracks'],
|
||||
"genre": "; ".join(album_json['genres'])
|
||||
initial_list_fields = {
|
||||
"music": [], "artist": [], "tracknum": [], "discnum": [],
|
||||
"duration": [], "isrc": [], "ids": [], "explicit_list": [], "popularity_list": []
|
||||
# "bpm": [], "gain": [] are usually unknown from this endpoint for tracks
|
||||
}
|
||||
song_metadata.update(initial_list_fields)
|
||||
|
||||
ar_album = [
|
||||
artist['name']
|
||||
for artist in album_json['artists']
|
||||
]
|
||||
image_urls = _get_best_image_urls(album_json.get('images', []))
|
||||
song_metadata.update(image_urls)
|
||||
|
||||
song_metadata['ar_album'] = "; ".join(ar_album)
|
||||
song_metadata['album'] = album_json.get('name', 'Unknown Album')
|
||||
song_metadata['label'] = album_json.get('label', '')
|
||||
song_metadata['year'] = convert_to_date(album_json.get('release_date', ''))
|
||||
song_metadata['release_date_precision'] = album_json.get('release_date_precision', 'unknown')
|
||||
song_metadata['nb_tracks'] = album_json.get('total_tracks', 0)
|
||||
song_metadata['genre'] = "; ".join(album_json.get('genres', []))
|
||||
song_metadata['album_type'] = album_json.get('album_type', 'unknown')
|
||||
song_metadata['popularity'] = album_json.get('popularity', 0)
|
||||
|
||||
external_ids = album_json.get('external_ids', {})
|
||||
song_metadata['upc'] = external_ids.get('upc', "Unknown")
|
||||
album_artists_data = album_json.get('artists', [])
|
||||
ar_album_names = [artist.get('name', '') for artist in album_artists_data if artist.get('name')]
|
||||
song_metadata['ar_album'] = "; ".join(filter(None, ar_album_names)) or 'Unknown Artist'
|
||||
|
||||
sm_items = song_metadata.items()
|
||||
album_external_ids = album_json.get('external_ids', {})
|
||||
song_metadata['upc'] = album_external_ids.get('upc', '')
|
||||
|
||||
for track in album_json['tracks']['items']:
|
||||
c_ids = track['id']
|
||||
detas = tracking(c_ids, album=True)
|
||||
if detas is None:
|
||||
logger.warning(f"Could not retrieve metadata for track {c_ids} in album {album_json['id']}. Skipping.")
|
||||
for key, item in sm_items:
|
||||
if type(item) is list:
|
||||
if key == 'isrc':
|
||||
song_metadata[key].append('Unknown')
|
||||
elif key in detas:
|
||||
song_metadata[key].append(detas[key])
|
||||
else:
|
||||
song_metadata[key].append('Unknown')
|
||||
copyrights_data = album_json.get('copyrights', [])
|
||||
song_metadata['copyright'] = copyrights_data[0].get('text', '') if copyrights_data else ''
|
||||
|
||||
# Add other common flat metadata keys with defaults if not directly from album_json
|
||||
song_metadata['bpm'] = 'Unknown'
|
||||
song_metadata['gain'] = 'Unknown'
|
||||
song_metadata['lyric'] = ''
|
||||
song_metadata['author'] = ''
|
||||
song_metadata['composer'] = ''
|
||||
song_metadata['lyricist'] = ''
|
||||
song_metadata['version'] = ''
|
||||
|
||||
|
||||
tracks_data = album_json.get('tracks', {}).get('items', [])
|
||||
for track_item in tracks_data:
|
||||
if not track_item: continue # Skip if track_item is None
|
||||
c_ids = track_item.get('id')
|
||||
if not c_ids: # If track has no ID, try to get some basic info directly
|
||||
song_metadata['music'].append(track_item.get('name', 'Unknown Track'))
|
||||
track_artists_data = track_item.get('artists', [])
|
||||
track_artist_names = [artist.get('name', '') for artist in track_artists_data if artist.get('name')]
|
||||
song_metadata['artist'].append("; ".join(filter(None, track_artist_names)) or 'Unknown Artist')
|
||||
song_metadata['tracknum'].append(track_item.get('track_number', 0))
|
||||
song_metadata['discnum'].append(track_item.get('disc_number', 0))
|
||||
song_metadata['duration'].append(track_item.get('duration_ms', 0) // 1000)
|
||||
song_metadata['isrc'].append(track_item.get('external_ids', {}).get('isrc', ''))
|
||||
song_metadata['ids'].append('N/A')
|
||||
song_metadata['explicit_list'].append(track_item.get('explicit', False))
|
||||
song_metadata['popularity_list'].append(track_item.get('popularity', 0))
|
||||
continue
|
||||
|
||||
for key, item in sm_items:
|
||||
if type(item) is list:
|
||||
if key == 'isrc':
|
||||
song_metadata[key].append(detas.get('isrc', 'Unknown'))
|
||||
elif key in detas:
|
||||
song_metadata[key].append(detas[key])
|
||||
else:
|
||||
song_metadata[key].append('Unknown')
|
||||
# Pass the main album_json as album_data_for_track to avoid refetching it in tracking()
|
||||
track_details = tracking(c_ids, album_data_for_track=album_json)
|
||||
|
||||
logger.debug(f"Successfully tracked metadata for album {album_json['id']}")
|
||||
if track_details:
|
||||
song_metadata['music'].append(track_details.get('music', 'Unknown Track'))
|
||||
song_metadata['artist'].append(track_details.get('artist', 'Unknown Artist'))
|
||||
song_metadata['tracknum'].append(track_details.get('tracknum', 0))
|
||||
song_metadata['discnum'].append(track_details.get('discnum', 0))
|
||||
# BPM and Gain are generally not per-track from this endpoint
|
||||
# song_metadata['bpm'].append(track_details.get('bpm', 'Unknown'))
|
||||
song_metadata['duration'].append(track_details.get('duration', 0))
|
||||
song_metadata['isrc'].append(track_details.get('isrc', ''))
|
||||
song_metadata['ids'].append(c_ids)
|
||||
song_metadata['explicit_list'].append(track_details.get('explicit', False))
|
||||
# popularity_list for track specific popularity if needed, or use album popularity
|
||||
# song_metadata['popularity_list'].append(track_details.get('popularity',0))
|
||||
|
||||
else: # Fallback if tracking(c_ids) failed
|
||||
logger.warning(f"Could not retrieve full metadata for track ID {c_ids} in album {album_json.get('id', 'N/A')}. Using minimal data.")
|
||||
song_metadata['music'].append(track_item.get('name', 'Unknown Track'))
|
||||
track_artists_data = track_item.get('artists', [])
|
||||
track_artist_names = [artist.get('name', '') for artist in track_artists_data if artist.get('name')]
|
||||
song_metadata['artist'].append("; ".join(filter(None, track_artist_names)) or 'Unknown Artist')
|
||||
song_metadata['tracknum'].append(track_item.get('track_number', 0))
|
||||
song_metadata['discnum'].append(track_item.get('disc_number', 0))
|
||||
song_metadata['duration'].append(track_item.get('duration_ms', 0) // 1000)
|
||||
song_metadata['isrc'].append(track_item.get('external_ids', {}).get('isrc', ''))
|
||||
song_metadata['ids'].append(c_ids)
|
||||
song_metadata['explicit_list'].append(track_item.get('explicit', False))
|
||||
# song_metadata['popularity_list'].append(track_item.get('popularity',0))
|
||||
|
||||
|
||||
logger.debug(f"Successfully tracked metadata for album {album_json.get('id', 'N/A')}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to track album metadata: {str(e)}")
|
||||
traceback.print_exc()
|
||||
logger.error(f"Failed to track album metadata for album ID {album_json.get('id', 'N/A') if album_json else 'N/A'}: {str(e)}")
|
||||
logger.debug(traceback.format_exc())
|
||||
return None
|
||||
|
||||
return song_metadata
|
||||
@@ -138,33 +245,69 @@ def tracking_episode(ids):
|
||||
datas = {}
|
||||
try:
|
||||
json_episode = Spo.get_episode(ids)
|
||||
if not json_episode:
|
||||
logger.error(f"Failed to get episode details for ID: {ids} from Spotify API.")
|
||||
return None
|
||||
|
||||
image_urls = _get_best_image_urls(json_episode.get('images', []))
|
||||
datas.update(image_urls)
|
||||
|
||||
datas['audio_preview_url'] = json_episode.get('audio_preview_url', '')
|
||||
datas['description'] = json_episode.get('description', '')
|
||||
datas['duration'] = json_episode.get('duration_ms', 0) // 1000
|
||||
datas['explicit'] = json_episode.get('explicit', False)
|
||||
datas['external_urls'] = json_episode.get('external_urls', {}).get('spotify', '')
|
||||
datas['external_urls_spotify'] = json_episode.get('external_urls', {}).get('spotify', '')
|
||||
datas['href'] = json_episode.get('href', '')
|
||||
datas['html_description'] = json_episode.get('html_description', '')
|
||||
datas['id'] = json_episode.get('id', '')
|
||||
datas['image'] = json_episode['images'][0]['url'] if json_episode.get('images') else ''
|
||||
datas['image2'] = json_episode['images'][1]['url'] if len(json_episode.get('images', [])) > 1 else ''
|
||||
datas['image3'] = json_episode['images'][2]['url'] if len(json_episode.get('images', [])) > 2 else ''
|
||||
datas['id'] = json_episode.get('id', '') # Episode's own ID
|
||||
|
||||
datas['is_externally_hosted'] = json_episode.get('is_externally_hosted', False)
|
||||
datas['is_playable'] = json_episode.get('is_playable', False)
|
||||
datas['language'] = json_episode.get('language', '')
|
||||
datas['language'] = json_episode.get('language', '') # Deprecated, use languages
|
||||
datas['languages'] = "; ".join(json_episode.get('languages', []))
|
||||
datas['name'] = json_episode.get('name', '')
|
||||
datas['music'] = json_episode.get('name', 'Unknown Episode') # Use 'music' for consistency with track naming
|
||||
datas['name'] = json_episode.get('name', 'Unknown Episode') # Keep 'name' as well if needed by other parts
|
||||
|
||||
datas['release_date'] = convert_to_date(json_episode.get('release_date', ''))
|
||||
datas['show'] = json_episode.get('show', {}).get('name', '')
|
||||
datas['publisher'] = json_episode.get('show', {}).get('publisher', '')
|
||||
datas['ids'] = ids
|
||||
datas['release_date_precision'] = json_episode.get('release_date_precision', 'unknown')
|
||||
|
||||
show_data = json_episode.get('show', {})
|
||||
datas['show_name'] = show_data.get('name', 'Unknown Show')
|
||||
datas['publisher'] = show_data.get('publisher', 'Unknown Publisher')
|
||||
datas['show_description'] = show_data.get('description', '')
|
||||
datas['show_explicit'] = show_data.get('explicit', False)
|
||||
datas['show_total_episodes'] = show_data.get('total_episodes', 0)
|
||||
datas['show_media_type'] = show_data.get('media_type', 'unknown') # e.g. 'audio'
|
||||
|
||||
# For tagger compatibility, map some show data to common track/album fields
|
||||
datas['artist'] = datas['publisher'] # Publisher as artist for episodes
|
||||
datas['album'] = datas['show_name'] # Show name as album for episodes
|
||||
datas['genre'] = "; ".join(show_data.get('genres', [])) # If shows have genres
|
||||
datas['copyright'] = copyrights_data[0].get('text', '') if (copyrights_data := show_data.get('copyrights', [])) else ''
|
||||
|
||||
|
||||
# Placeholder for tags not directly from this API response but might be expected by tagger
|
||||
datas['tracknum'] = 1 # Default for single episode
|
||||
datas['discnum'] = 1 # Default for single episode
|
||||
datas['ar_album'] = datas['publisher']
|
||||
datas['label'] = datas['publisher']
|
||||
datas['bpm'] = 'Unknown'
|
||||
datas['gain'] = 'Unknown'
|
||||
datas['isrc'] = ''
|
||||
datas['upc'] = ''
|
||||
datas['lyric'] = ''
|
||||
datas['author'] = ''
|
||||
datas['composer'] = ''
|
||||
datas['lyricist'] = ''
|
||||
datas['version'] = ''
|
||||
|
||||
datas['ids'] = ids # The episode's own ID passed to the function
|
||||
|
||||
logger.debug(f"Successfully tracked metadata for episode {ids}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to track episode metadata: {str(e)}")
|
||||
traceback.print_exc()
|
||||
logger.error(f"Failed to track episode metadata for ID {ids}: {str(e)}")
|
||||
logger.debug(traceback.format_exc())
|
||||
return None
|
||||
|
||||
return datas
|
||||
Reference in New Issue
Block a user