diff --git a/.dockerignore b/.dockerignore index 0a31b95..f687cec 100755 --- a/.dockerignore +++ b/.dockerignore @@ -1,62 +1,36 @@ -# Git -.git -.gitignore -.gitattributes +# Allowlist minimal build context +* -# Docker -docker-compose.yaml -docker-compose.yml -Dockerfile -.dockerignore +# Backend +!requirements.txt +!app.py +!routes/** +# Re-ignore caches and compiled files inside routes +routes/**/__pycache__/ +routes/**/.pytest_cache/ +routes/**/*.pyc +routes/**/*.pyo -# Node -node_modules -spotizerr-ui/node_modules -npm-debug.log -pnpm-lock.yaml +# Frontend: only what's needed to build +!spotizerr-ui/package.json +!spotizerr-ui/pnpm-lock.yaml +!spotizerr-ui/pnpm-workspace.yaml +!spotizerr-ui/index.html +!spotizerr-ui/vite.config.ts +!spotizerr-ui/postcss.config.mjs +!spotizerr-ui/tsconfig.json +!spotizerr-ui/tsconfig.app.json +!spotizerr-ui/tsconfig.node.json +!spotizerr-ui/src/** +!spotizerr-ui/public/** +!spotizerr-ui/scripts/** +# Exclude heavy/unnecessary frontend folders +spotizerr-ui/node_modules/** +spotizerr-ui/dist/** +spotizerr-ui/dev-dist/** -# Python -__pycache__ -*.pyc -*.pyo -*.pyd -.Python -.env -.venv -venv/ -env/ -.env.example - -# Editor/OS -.vscode -.idea -.DS_Store -*.swp - -# Application data -credentials.json -test.py -downloads/ -creds/ -Test.py -prgs/ -flask_server.log -test.sh -routes/__pycache__/* -routes/utils/__pycache__/* -search_test.py -config/main.json -.cache -config/state/queue_state.json -output.log -queue_state.json -search_demo.py -celery_worker.log -static/js/* +# Always exclude local data/logs/tests/etc. +.data/ logs/ -data +Downloads/ tests/ - -# Non-essential files -docs/ -README.md diff --git a/.env.example b/.env.example index 7e3cd89..227381e 100644 --- a/.env.example +++ b/.env.example @@ -1,9 +1,9 @@ ### -### Main configuration file of the server. If you -### plan to have this only for personal use, you +### Main configuration file of the server. If you +### plan to have this only for personal use, you ### can leave the defaults as they are. ### -### If you plan on using for a server, +### If you plan on using for a server, ### see [insert docs url] ### @@ -19,13 +19,7 @@ REDIS_PASSWORD=CHANGE_ME # Set to true to filter out explicit content. EXPLICIT_FILTER=false - - -# User and group ID for the container. Sets the owner of the downloaded files. -PUID=1000 -PGID=1000 - -# Optional: Sets the default file permissions for newly created files within the container. +# Optional: Sets the default file permissions for newly created files within the container. UMASK=0022 # Whether to setup file permissions on startup. May improve performance on remote/slow filesystems @@ -51,7 +45,7 @@ DEFAULT_ADMIN_PASSWORD=admin123 # Whether to allow new users to register themselves or leave that only available for admins DISABLE_REGISTRATION=false -# SSO Configuration +# SSO Configuration SSO_ENABLED=true SSO_BASE_REDIRECT_URI=http://127.0.0.1:7171/api/auth/sso/callback FRONTEND_URL=http://127.0.0.1:7171 diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..c85cdff --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,11 @@ +version: 2 +mkdocs: + configuration: mkdocs.yml +build: + os: ubuntu-22.04 + tools: + python: "3.11" + jobs: + post_install: + - pip install --upgrade pip + - pip install mkdocs mkdocs-material \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 739f4a8..b464440 100755 --- a/Dockerfile +++ b/Dockerfile @@ -7,40 +7,71 @@ RUN pnpm install --frozen-lockfile COPY spotizerr-ui/. . RUN pnpm build -# Stage 2: Final application image -FROM python:3.12-slim +# Stage 2: Python dependencies builder (create relocatable deps dir) +FROM python:3.11-slim AS py-deps +WORKDIR /app +COPY requirements.txt . +COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/ +RUN uv pip install --target /python -r requirements.txt -# Set an environment variable for non-interactive frontend installation -ENV DEBIAN_FRONTEND=noninteractive +# Stage 3: Fetch static ffmpeg/ffprobe binaries +FROM debian:stable-slim AS ffmpeg +ARG TARGETARCH +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates curl xz-utils jq \ + && rm -rf /var/lib/apt/lists/* +RUN set -euo pipefail; \ + case "$TARGETARCH" in \ + amd64) ARCH_SUFFIX=linux64 ;; \ + arm64) ARCH_SUFFIX=linuxarm64 ;; \ + *) echo "Unsupported arch: $TARGETARCH" && exit 1 ;; \ + esac; \ + ASSET_URL=$(curl -fsSL https://api.github.com/repos/BtbN/FFmpeg-Builds/releases/latest \ + | jq -r ".assets[] | select(.name | endswith(\"${ARCH_SUFFIX}-gpl.tar.xz\")) | .browser_download_url" \ + | head -n1); \ + if [ -z "$ASSET_URL" ]; then \ + echo "Failed to resolve FFmpeg asset for arch ${ARCH_SUFFIX}" && exit 1; \ + fi; \ + echo "Fetching FFmpeg from: $ASSET_URL"; \ + curl -fsSL -o /tmp/ffmpeg.tar.xz "$ASSET_URL"; \ + tar -xJf /tmp/ffmpeg.tar.xz -C /tmp; \ + mv /tmp/ffmpeg-* /ffmpeg + +# Stage 4: Prepare world-writable runtime directories +FROM busybox:1.36.1-musl AS runtime-dirs +RUN mkdir -p /artifact/downloads /artifact/data/config /artifact/data/creds /artifact/data/watch /artifact/data/history /artifact/logs/tasks \ + && touch /artifact/.cache \ + && chmod -R 0777 /artifact + +# Stage 5: Final application image (distroless) +FROM gcr.io/distroless/python3-debian12 LABEL org.opencontainers.image.source="https://github.com/Xoconoch/spotizerr" WORKDIR /app -# Install system dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - ffmpeg gosu\ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* +# Ensure Python finds vendored site-packages and unbuffered output +ENV PYTHONPATH=/python +ENV PYTHONUNBUFFERED=1 +ENV PYTHONUTF8=1 +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 -# Install Python dependencies -COPY requirements.txt . +# Copy application code +COPY --chown=65532:65532 . . -COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/ -RUN uv pip install --system -r requirements.txt +# Copy compiled assets from the frontend build +COPY --from=frontend-builder --chown=65532:65532 /app/spotizerr-ui/dist ./spotizerr-ui/dist -# Copy application code (excluding UI source and TS source) -COPY . . +# Copy vendored Python dependencies +COPY --from=py-deps --chown=65532:65532 /python /python -# Copy compiled assets from previous stages -COPY --from=frontend-builder /app/spotizerr-ui/dist ./spotizerr-ui/dist +# Copy static ffmpeg binaries +COPY --from=ffmpeg --chown=65532:65532 /ffmpeg/bin/ffmpeg /usr/local/bin/ffmpeg +COPY --from=ffmpeg --chown=65532:65532 /ffmpeg/bin/ffprobe /usr/local/bin/ffprobe -# Create necessary directories with proper permissions -RUN mkdir -p downloads data/config data/creds data/watch data/history logs/tasks && \ - chmod -R 777 downloads data logs +# Copy pre-created world-writable runtime directories +COPY --from=runtime-dirs --chown=65532:65532 /artifact/ ./ -# Make entrypoint script executable -RUN chmod +x entrypoint.sh - -# Set entrypoint to our script -ENTRYPOINT ["/app/entrypoint.sh"] +# No shell or package manager available in distroless +ENTRYPOINT ["python3", "app.py"] diff --git a/README.md b/README.md index 77f0279..5627571 100644 --- a/README.md +++ b/README.md @@ -27,157 +27,6 @@ If you self-host a music server with other users than yourself, you almost certa -## ✨ Key Features - -### 🎵 **Granular download support** -- **Individual Tracks** - Download any single track -- **Complete Albums** - Download entire albums with proper metadata -- **Full Playlists** - Download complete playlists (even massive ones with 1000+ tracks) -- **Artist Discographies** - Download an artist's complete catalog with filtering options -- **Spotify URL Support** - Paste any Spotify URL directly to queue downloads - -### 📱 **Modern Web Interface** -- **Progressive Web App (PWA)** - Install as a native client on mobile/desktop (installation process may vary depending on the browser/device) -- **Multiple Themes** - Light, dark, and system themes -- **Touch-friendly** - Swipe gestures and mobile-optimized controls - -### 🤖 **Intelligent Monitoring** -- **Playlist Watching** - Automatically download new tracks added to Spotify playlists -- **Artist Watching** - Monitor artists for new releases and download them automatically -- **Configurable Intervals** - Set how often to check for updates -- **Manual Triggers** - Force immediate checks when needed - -### ⚡ **Advanced Queue Management** -- **Concurrent Downloads** - Configure multiple simultaneous downloads -- **Real-time Updates** - Live progress updates via Server-Sent Events -- **Duplicate Prevention** - Automatically prevents duplicate downloads -- **Queue Persistence** - Downloads continue even after browser restart -- **Cancellation Support** - Cancel individual downloads or clear entire queue - -### 🔧 **Extensive Configuration** -- **Quality Control** - Configure audio quality per service (limitations per account tier apply) -- **Format Options** - Convert to MP3, FLAC, AAC, OGG, OPUS, WAV, ALAC in various bitrates -- **Custom Naming** - Flexible file and folder naming patterns -- **Content Filtering** - Hide explicit content if desired - -### 📊 **Comprehensive History** -- **Download Tracking** - Complete history of all downloads with metadata -- **Success Analytics** - Track success rates, failures, and skipped items -- **Search & Filter** - Find past downloads by title, artist, or status -- **Detailed Logs** - View individual track status for album/playlist downloads -- **Export Data** - Access complete metadata and external service IDs - -### 👥 **Multi-User Support** -- **User Authentication** - Secure login system with JWT tokens -- **SSO Integration** - Single Sign-On with Google and GitHub -- **Admin Panel** - User management and system configuration - -## 🚀 Quick Start - -### Prerequisites -- Docker and Docker Compose -- Spotify account(s) -- Deezer account(s) (optional, but recommended) -- Spotify API credentials (Client ID & Secret from [Spotify Developer Dashboard](https://developer.spotify.com/dashboard)) - -### Installation - -1. **Create project directory** - ```bash - mkdir spotizerr && cd spotizerr - ``` - -2. **Setup environment file** - ```bash - # Download .env.example from the repository and create .env - # Update all variables (e.g. Redis credentials, PUID/PGID, UMASK) - ``` - -3. **Copy docker-compose.yaml** - ```bash - # Download docker-compose.yaml from the repository - ``` - -4. **Start the application** - ```bash - docker compose up -d - ``` - -5. **Next steps** - - Before doing anything, it is recommended to go straight to [Configuration](#-configuration) - -## 🔧 Configuration - -### Service Accounts Setup - -1. **Spotify setup** - - Spotify is very restrictive, so use the [spotizerr-auth](https://github.com/Xoconoch/spotizerr-auth) tool on a computer with the spotify client installed to simplify this part of the setup. - -2. **Deezer setup (Optional but recommended for better stability, even if it's a free account)** - - Get your Deezer ARL token: - - **Chrome/Edge**: Open [Deezer](https://www.deezer.com/), press F12 → Application → Cookies → "https://www.deezer.com" → Copy "arl" value - - **Firefox**: Open [Deezer](https://www.deezer.com/), press F12 → Storage → Cookies → "https://www.deezer.com" → Copy "arl" value - - Add the ARL token in Settings → Accounts - -3. **Configure Download Settings** - - Set audio quality preferences - - Configure output format and naming - - Adjust concurrent download limits - -### Watch System Setup - -1. **Enable Monitoring** - - Go to Settings → Watch - - Enable the watch system - - Set check intervals - -2. **Add Items to Watch** - - Search for playlists or artists - - Click the "Watch" button - - New content will be automatically downloaded - -## 📋 Usage Examples - -### Download a Playlist -1. Search for the playlist or paste its Spotify URL -2. Click the download button -3. Monitor progress in the real-time queue - -### Monitor an Artist -1. Search for the artist -2. Click "Add to Watchlist" -3. Configure which release types to monitor (albums, singles, etc.) -4. New releases will be automatically downloaded - -### Bulk Download an Artist's Discography -1. Go to the artist page -2. Select release types (albums, singles, compilations) -3. Click "Download Discography" -4. All albums will be queued automatically - -## 🔍 Advanced Features - -### Custom File Naming -Configure how files and folders are named: -- `%artist%/%album%/%tracknum%. %title%` -- `%ar_album%/%album% (%year%)/%title%` -- Support for track numbers, artists, albums, years, and more - -### Quality Settings -- **Spotify**: OGG 96k, 160k, and 320k (320k requires Premium) -- **Deezer**: MP3 128k, MP3 320k (sometimes requires Premium), and FLAC (Premium only) -- **Conversion**: Convert to any supported format with custom bitrate - -### Fallback System -- Configure primary and fallback services -- Automatically switches if primary service fails -- Useful for geographic restrictions or account limits - -### Real-time Mode -- **Spotify only**: Matches track length with download time for optimal timing - -## 🆘 Support & Troubleshooting - ### Common Issues **Downloads not starting?** diff --git a/app.py b/app.py index 0036763..24ecd6a 100755 --- a/app.py +++ b/app.py @@ -41,7 +41,17 @@ except Exception as e: log_level_str = os.getenv("LOG_LEVEL", "WARNING").upper() log_level = LOG_LEVELS.get(log_level_str, logging.INFO) -# Import route routers (to be created) +# Apply process umask from environment as early as possible +_umask_value = os.getenv("UMASK") +if _umask_value: + try: + os.umask(int(_umask_value, 8)) + except Exception: + # Defer logging setup; avoid failing on invalid UMASK + pass + + +# Import and initialize routes (this will start the watch manager) from routes.auth.credentials import router as credentials_router from routes.auth.auth import router as auth_router from routes.content.album import router as album_router @@ -66,7 +76,6 @@ from routes.auth.middleware import AuthMiddleware # Import watch manager controls (start/stop) without triggering side effects from routes.utils.watch.manager import start_watch_manager, stop_watch_manager -# Import and initialize routes (this will start the watch manager) # Configure application-wide logging @@ -76,6 +85,17 @@ def setup_logging(): logs_dir = Path("logs") logs_dir.mkdir(exist_ok=True) + # Ensure required runtime directories exist + for p in [ + Path("downloads"), + Path("data/config"), + Path("data/creds"), + Path("data/watch"), + Path("data/history"), + Path("logs/tasks"), + ]: + p.mkdir(parents=True, exist_ok=True) + # Set up log file paths main_log = logs_dir / "spotizerr.log" @@ -131,6 +151,8 @@ def setup_logging(): def check_redis_connection(): """Check if Redis is available and accessible""" + from routes.utils.celery_config import REDIS_URL + if not REDIS_URL: logging.error("REDIS_URL is not configured. Please check your environment.") return False @@ -176,6 +198,20 @@ async def lifespan(app: FastAPI): # Startup setup_logging() + # Run migrations before initializing services + try: + from routes.migrations import run_migrations_if_needed + + run_migrations_if_needed() + logging.getLogger(__name__).info( + "Database migrations executed (if needed) early in startup." + ) + except Exception as e: + logging.getLogger(__name__).error( + f"Database migration step failed early in startup: {e}", exc_info=True + ) + sys.exit(1) + # Check Redis connection if not check_redis_connection(): logging.error( @@ -185,6 +221,8 @@ async def lifespan(app: FastAPI): # Start Celery workers try: + from routes.utils.celery_manager import celery_manager + celery_manager.start() logging.info("Celery workers started successfully") except Exception as e: @@ -192,6 +230,8 @@ async def lifespan(app: FastAPI): # Start Watch Manager after Celery is up try: + from routes.utils.watch.manager import start_watch_manager, stop_watch_manager + start_watch_manager() logging.info("Watch Manager initialized and registered for shutdown.") except Exception as e: @@ -204,12 +244,16 @@ async def lifespan(app: FastAPI): # Shutdown try: + from routes.utils.watch.manager import start_watch_manager, stop_watch_manager + stop_watch_manager() logging.info("Watch Manager stopped") except Exception as e: logging.error(f"Error stopping Watch Manager: {e}") try: + from routes.utils.celery_manager import celery_manager + celery_manager.stop() logging.info("Celery workers stopped") except Exception as e: @@ -235,13 +279,30 @@ def create_app(): ) # Add authentication middleware (only if auth is enabled) - if AUTH_ENABLED: - app.add_middleware(AuthMiddleware) - logging.info("Authentication system enabled") - else: - logging.info("Authentication system disabled") + try: + from routes.auth import AUTH_ENABLED + from routes.auth.middleware import AuthMiddleware + + if AUTH_ENABLED: + app.add_middleware(AuthMiddleware) + logging.info("Authentication system enabled") + else: + logging.info("Authentication system disabled") + except Exception as e: + logging.warning(f"Auth system initialization failed or unavailable: {e}") # Register routers with URL prefixes + from routes.auth.auth import router as auth_router + from routes.system.config import router as config_router + from routes.core.search import router as search_router + from routes.auth.credentials import router as credentials_router + from routes.content.album import router as album_router + from routes.content.track import router as track_router + from routes.content.playlist import router as playlist_router + from routes.content.artist import router as artist_router + from routes.system.progress import router as prgs_router + from routes.core.history import router as history_router + app.include_router(auth_router, prefix="/api/auth", tags=["auth"]) # Include SSO router if available diff --git a/docker-compose.yaml b/docker-compose.yaml index d7bba01..951b1ee 100755 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,16 +1,24 @@ +# HEY, YOU! READ THE DOCS BEFORE YOU DO ANYTHING! +# https://spotizerr.rtfd.io + name: spotizerr services: spotizerr: image: cooldockerizer93/spotizerr + user: "1000:1000" # Spotizerr user:group ids volumes: - - ./data:/app/data - - ./downloads:/app/downloads - - ./logs:/app/logs + # Ensure these directories and the .cache file exist and are writable by the container user + - ./data:/app/data # data directory, contains config, creds, watch, history + - ./downloads:/app/downloads # downloads directory, contains downloaded files + - ./logs:/app/logs # logs directory, contains logs + - ./.cache:/app/.cache # cache file ports: + # Port to expose the app on - 7171:7171 container_name: spotizerr-app restart: unless-stopped env_file: + # Ensure you have a .env file in the root of the project, with the correct values - .env depends_on: - redis diff --git a/docs/API_DOCUMENTATION.md b/docs/api.md similarity index 93% rename from docs/API_DOCUMENTATION.md rename to docs/api.md index 5746202..42e3867 100644 --- a/docs/API_DOCUMENTATION.md +++ b/docs/api.md @@ -129,7 +129,7 @@ Get SSO configuration and available providers. #### `GET /auth/sso/login/google` Redirect to Google OAuth. -#### `GET /auth/sso/login/github` +#### `GET /auth/sso/login/github` Redirect to GitHub OAuth. #### `GET /auth/sso/callback/google` @@ -168,7 +168,7 @@ Get track metadata. ```json { "id": "string", - "name": "string", + "name": "string", "artists": [{"name": "string"}], "album": {"name": "string"}, "duration_ms": 180000, @@ -196,6 +196,8 @@ Download an entire album. Get album metadata. **Query Parameters:** - `id`: Spotify album ID +- `limit`: Tracks page size (optional) +- `offset`: Tracks page offset (optional) ### Playlist Downloads @@ -216,6 +218,7 @@ Download an entire playlist. Get playlist metadata. **Query Parameters:** - `id`: Spotify playlist ID +- `include_tracks`: true to include tracks (default: false) #### `GET /playlist/metadata` Get detailed playlist metadata including tracks. @@ -244,14 +247,12 @@ Download artist's discography. } ``` -#### `GET /artist/download/cancel` -**Query Parameters:** -- `task_id`: Task ID to cancel - #### `GET /artist/info` Get artist metadata. **Query Parameters:** - `id`: Spotify artist ID +- `limit`: Albums page size (default: 10, min: 1) +- `offset`: Albums page offset (default: 0, min: 0) ## 📺 Watch Functionality @@ -371,11 +372,11 @@ Search Spotify content. ### Task Monitoring #### `GET /prgs/list` -List all tasks with optional filtering. +List tasks with pagination. **Query Parameters:** -- `status`: Filter by status (`pending`, `running`, `completed`, `failed`) -- `download_type`: Filter by type (`track`, `album`, `playlist`) -- `limit`: Results limit +- `page`: Page number (default: 1) +- `limit`: Items per page (default: 50, max: 100) +- `active_only`: If true, only return active tasks #### `GET /prgs/{task_id}` Get specific task details and progress. @@ -383,7 +384,10 @@ Get specific task details and progress. #### `GET /prgs/updates` Get task updates since last check. **Query Parameters:** -- `since`: Timestamp to get updates since +- `since`: Unix timestamp (required for delta updates). If omitted, returns a paginated snapshot. +- `page`: Page number for non-active tasks (default: 1) +- `limit`: Items per page for non-active tasks (default: 20, max: 100) +- `active_only`: If true, only return active tasks #### `GET /prgs/stream` **Server-Sent Events (SSE)** endpoint for real-time progress updates. @@ -448,13 +452,13 @@ Get download statistics. #### `GET /history/search` Search download history. **Query Parameters:** -- `q`: Search query -- `field`: Field to search (`name`, `artist`, `url`) +- `q`: Search query (required) +- `limit`: Max results (default: 50, max: 200) #### `GET /history/recent` Get recent downloads. **Query Parameters:** -- `hours`: Hours to look back (default: 24) +- `limit`: Max results (default: 20, max: 100) #### `GET /history/failed` Get failed downloads. @@ -464,8 +468,7 @@ Clean up old history entries. **Request:** ```json { - "older_than_days": 30, - "keep_failed": true + "days_old": 30 } ``` @@ -641,4 +644,4 @@ curl -X PUT "http://localhost:7171/api/playlist/watch/37i9dQZF1DXcBWIGoYBM5M" \ --- -*This documentation covers all endpoints discovered in the Spotizerr routes directory. The API is designed for high-throughput music downloading with comprehensive monitoring and management capabilities.* \ No newline at end of file +*This documentation covers all endpoints discovered in the Spotizerr routes directory. The API is designed for high-throughput music downloading with comprehensive monitoring and management capabilities.* \ No newline at end of file diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..90ac35f --- /dev/null +++ b/docs/index.md @@ -0,0 +1,16 @@ +## Spotizerr Documentation + +Start with Getting started, then explore the sections below. + +- [Getting started](user/getting-started.md) +- [Configuration](user/configuration.md) +- [Environment](user/environment.md) +- [Tracks](user/tracks.md) +- [Albums](user/albums.md) +- [Playlists](user/playlists.md) +- [Artists](user/artists.md) +- [Watchlist](user/watchlist.md) +- [History](user/history.md) +- [Multi-user](user/multi-user.md) + +For API details, see [API](api.md). \ No newline at end of file diff --git a/docs/user/albums.md b/docs/user/albums.md new file mode 100644 index 0000000..2f3c426 --- /dev/null +++ b/docs/user/albums.md @@ -0,0 +1,13 @@ +## Albums + +- Open from Search or an Artist page. +- Actions: + - Download full album or any track + - Browse tracklist (order, artists, duration) + - Large albums: tracks load in pages as you scroll +- Explicit filter hides explicit tracks when enabled in Config + +Endpoints: +- GET `/api/album/info?id=...&limit=50&offset=...` — album metadata + paged tracks +- GET `/api/album/download/{album_id}` — queue album download +- GET `/api/prgs/stream` — live progress via SSE \ No newline at end of file diff --git a/docs/user/artists.md b/docs/user/artists.md new file mode 100644 index 0000000..28bda22 --- /dev/null +++ b/docs/user/artists.md @@ -0,0 +1,26 @@ +## Artists + +- Open from Search. +- Discography sections: Albums, Singles, Compilations, Appears On (infinite scroll) +- Download: + - Download all (queues albums by selected types) + - Download any album individually +- Watch: + - Add/remove artist to Watchlist + - Configure release types and intervals in Configuration → Watch + +How to monitor an artist: +1. Search the artist and open their page +2. Click Watch +3. Configure in Configuration → Watch + +How to download discography: +1. Open the artist page +2. Select release types (Albums, Singles, Compilations) +3. Click Download All; track in Queue and History + +Endpoints: +- GET `/api/artist/info?id=...&limit=10&offset=...` — metadata + paged albums +- GET `/api/artist/download/{artist_id}?album_type=album,single,compilation` — queue discography +- PUT `/api/artist/watch/{artist_id}` / DELETE `/api/artist/watch/{artist_id}` +- GET `/api/artist/watch/{artist_id}/status` \ No newline at end of file diff --git a/docs/user/configuration.md b/docs/user/configuration.md new file mode 100644 index 0000000..4d71ed5 --- /dev/null +++ b/docs/user/configuration.md @@ -0,0 +1,44 @@ +## Configuration + +See also: [Environment variables](environment.md) + +Open Configuration in the web UI. Tabs: + +- General (admin) + - App version, basic info +- Downloads (admin) + - Concurrent downloads, retry behavior + - Quality/format defaults and conversion + - Real-time mode: aligns download time with track length +- Formatting (admin) + - File/folder naming patterns (examples) + - `%artist%/%album%/%tracknum%. %title%` + - `%ar_album%/%album% (%year%)/%title%` +- Accounts (admin) + - Spotify: use `spotizerr-auth` to add credentials + - Deezer ARL (optional): + - Chrome/Edge: DevTools → Application → Cookies → https://www.deezer.com → copy `arl` + - Firefox: DevTools → Storage → Cookies → https://www.deezer.com → copy `arl` + - Paste ARL in Accounts + - Select main account when multiple exist +- Watch (admin) + - Enable/disable watch system + - Set check intervals + - Manually trigger checks (artists/playlists) +- Server (admin) + - System info and advanced settings +- Profile (all users when auth is enabled) + - Change password, view role and email + +Quality formats (reference): +- Spotify: OGG 96k/160k/320k (320k requires Premium) +- Deezer: MP3 128k/320k (320k may require Premium), FLAC (Premium) +- Conversion: MP3/FLAC/AAC/OGG/OPUS/WAV/ALAC with custom bitrate + +Fallback system: +- Configure primary and fallback services +- Automatically switches if primary fails (useful for geo/account limits) + +Notes: +- Explicit content filter applies in pages (e.g., hides explicit tracks on album/playlist views) +- Watch system must be enabled before adding items \ No newline at end of file diff --git a/docs/user/environment.md b/docs/user/environment.md new file mode 100644 index 0000000..7d0df4c --- /dev/null +++ b/docs/user/environment.md @@ -0,0 +1,36 @@ +## Environment variables + +Location: project `.env`. Minimal reference for server admins. + +### Core +- HOST: Interface to bind (default `0.0.0.0`) +- EXPLICIT_FILTER: Filter explicit content (`true|false`, default `false`) + +### Redis +- REDIS_HOST: Hostname (default `redis`) +- REDIS_PORT: Port (default `6379`) +- REDIS_DB: Database index (default `0`) +- REDIS_PASSWORD: Password + +### File ownership & permissions +- UMASK: Default permissions for new files (default `0022`) +- SKIP_SET_PERMISSIONS: Skip permission fix on startup (`true|false`, default `false`) + +### Multi-user & auth +- ENABLE_AUTH: Enable authentication (`true|false`, default `false`) +- JWT_SECRET: Long random string for tokens (required if auth enabled) +- JWT_EXPIRATION_HOURS: Session duration in hours (default `720`) +- DEFAULT_ADMIN_USERNAME: Seed admin username (default `admin`) +- DEFAULT_ADMIN_PASSWORD: Seed admin password (change it!) +- DISABLE_REGISTRATION: Disable public signups (`true|false`, default `false`) + +### SSO +- SSO_ENABLED: Enable SSO (`true|false`) +- SSO_BASE_REDIRECT_URI: Base backend callback (e.g. `http://127.0.0.1:7171/api/auth/sso/callback`) +- FRONTEND_URL: Public UI base (e.g. `http://127.0.0.1:7171`) +- GOOGLE_CLIENT_ID / GOOGLE_CLIENT_SECRET +- GITHUB_CLIENT_ID / GITHUB_CLIENT_SECRET + +### Tips +- If running behind a reverse proxy, set `FRONTEND_URL` and `SSO_BASE_REDIRECT_URI` to public URLs. +- Change `DEFAULT_ADMIN_*` on first login or disable registration and create users from the admin panel. \ No newline at end of file diff --git a/docs/user/getting-started.md b/docs/user/getting-started.md new file mode 100644 index 0000000..c357eb8 --- /dev/null +++ b/docs/user/getting-started.md @@ -0,0 +1,90 @@ +## Getting started + +### Prerequisites +- Docker and Docker Compose +- Spotify account(s) +- Deezer account (optional, recommended for FLAC) +- Spotify API `client_id` and `client_secret` (from Spotify Developer Dashboard) + +Quick start (Docker Compose): + +```bash +mkdir spotizerr && cd spotizerr +mkdir -p data logs downloads && touch .cache +wget https://github.com/spotizerr-dev/spotizerr/blob/main/docker-compose.yaml + +# Before running this last command, check your docker compose file first, it is well-documented. +docker compose up -d +``` + +### Initial setup +- Open the web UI (default: `http://localhost:7171`) +- Go to Configuration → Accounts +- Use `spotizerr-auth` to register Spotify credentials quickly + +Spotify account setup with spotizerr-auth: + +```bash +docker run --network=host --rm -it cooldockerizer93/spotizerr-auth +``` +or, if docker doesn't work: + +#### Alternative installers + + +Linux / macOS + +```bash +python3 -m venv .venv && source .venv/bin/activate && pip install spotizerr-auth +``` + + + + +Windows (PowerShell) + +```powershell +python -m venv .venv; .venv\Scripts\Activate.ps1; pip install spotizerr-auth +``` + + + + +Windows (cmd.exe) + +```cmd +python -m venv .venv && .venv\Scripts\activate && pip install spotizerr-auth +``` + + + +Then run `spotizerr-auth`. + +_Note: You will have to enable the virtual environment everytime you want to register a new account._ + +### Registering account +- Ensure Spotify client is opened before starting +- Enter Spotizerr URL (e.g., http://localhost:7171) +- Enter Spotify API `client_id` and `client_secret` if prompted (one-time) +- Name the account + region code (e.g., US) +- Transfer playback to the temporary device when asked +- Credentials are posted to Spotizerr automatically + +### Next steps + +- Add Deezer ARL in Configuration → Accounts (optional, allows for FLAC availability if premium) +- Adjust Download and Formatting options +- Enable Watch system if you want automatic downloads + +### Troubleshooting (quick) + +- Downloads not starting: verify service credentials and API keys +- Watch not working: enable in Configuration → Watch and set intervals +- Auth issues: ensure JWT secret and SSO creds (if used); try clearing browser cache +- Queue stalling: force-refresh the page (ctrl+F5) + +### Logs +```bash +docker logs spotizerr +``` +- Enable Watch system if you want auto-downloads \ No newline at end of file diff --git a/docs/user/history.md b/docs/user/history.md new file mode 100644 index 0000000..74d77b5 --- /dev/null +++ b/docs/user/history.md @@ -0,0 +1,17 @@ +## History + +See all downloads and their outcomes. + +- Filters + - By type (track/album/playlist) and status (completed/failed/skipped/in_progress) + - Pagination for large histories +- Drill-down + - Open an entry to view child tracks for albums/playlists + - Re-queue failures from the UI + +Backend endpoints used: + +- GET `/api/history?download_type=&status=&limit=&offset=` +- GET `/api/history/{task_id}` (entry) +- GET `/api/history/{task_id}/children` (child tracks) +- GET `/api/history/stats`, `/api/history/recent`, `/api/history/failed` (summaries) \ No newline at end of file diff --git a/docs/user/multi-user.md b/docs/user/multi-user.md new file mode 100644 index 0000000..823ce14 --- /dev/null +++ b/docs/user/multi-user.md @@ -0,0 +1,28 @@ +## Multi-user + +Authentication is optional. When enabled: + +Login/Register + + - Local accounts with username/password + - First registered user becomes admin + - Public registration can be disabled + +SSO (optional) + + - Google and GitHub when configured + +Roles + + - User: can search/download, manage their profile + - Admin: access to all Configuration tabs and user management + +Admin actions + + - Create/delete users, change roles + - Reset user passwords + +Where to find it in the UI: + +- User menu (top-right) → Profile settings +- Configuration → User Management (admin) \ No newline at end of file diff --git a/docs/user/playlists.md b/docs/user/playlists.md new file mode 100644 index 0000000..8785172 --- /dev/null +++ b/docs/user/playlists.md @@ -0,0 +1,28 @@ +## Playlists + +Open a playlist from search. + +- Download + - Download entire playlist + - Download individual tracks +- Metadata and tracks + - Loads metadata first (fast, avoids rate limits) + - Tracks load in pages as you scroll +- Watch + - Add/remove playlist to Watchlist (auto-download new additions when enabled) + +How-to: download a playlist + + 1. Search for the playlist or paste its Spotify URL + 2. Click Download + 3. Monitor progress in the Queue; results appear in History + +Backend endpoints used: + +- GET `/api/playlist/metadata?id=...` (metadata only) +- GET `/api/playlist/tracks?id=...&limit=50&offset=...` (paged tracks) +- GET `/api/playlist/info?id=...&include_tracks=true` (full info when needed) +- GET `/api/playlist/download/{playlist_id}` (queue download) +- PUT `/api/playlist/watch/{playlist_id}` (watch) +- DELETE `/api/playlist/watch/{playlist_id}` (unwatch) +- GET `/api/playlist/watch/{playlist_id}/status` (status) \ No newline at end of file diff --git a/docs/user/tracks.md b/docs/user/tracks.md new file mode 100644 index 0000000..564acb3 --- /dev/null +++ b/docs/user/tracks.md @@ -0,0 +1,17 @@ +## Tracks + +Find a track via search or open a track page. + +- Download + - Click Download on result card or track page + - Progress visible in the Queue drawer +- Open on Spotify + - From track page, open the Spotify link +- Details shown + - Artists, album, duration, popularity + +Backend endpoints used: + +- GET `/api/track/info?id=...` (metadata) +- GET `/api/track/download/{track_id}` (queue download) +- GET `/api/progress/stream` (live queue updates) \ No newline at end of file diff --git a/docs/user/watchlist.md b/docs/user/watchlist.md new file mode 100644 index 0000000..cb1c5b7 --- /dev/null +++ b/docs/user/watchlist.md @@ -0,0 +1,18 @@ +## Watchlist + +Enable the watch system in Configuration → Watch first. + +- Add items + - From Artist or Playlist pages, click Watch +- What it does + - Periodically checks watched items + - Queues new releases (artists) and/or newly added tracks (playlists) +- Setup + - Enable watch system and set intervals in Configuration → Watch + - Trigger a manual check if you want immediate processing + +Backend endpoints used: + +- Artists: PUT/DELETE/GET status under `/api/artist/watch/*` +- Playlists: PUT/DELETE/GET status under `/api/playlist/watch/*` +- Manual triggers: POST `/api/artist/watch/trigger_check` and `/api/playlist/watch/trigger_check` \ No newline at end of file diff --git a/entrypoint.sh b/entrypoint.sh deleted file mode 100755 index cf00504..0000000 --- a/entrypoint.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -set -e - -# Set umask if UMASK variable is provided -if [ -n "${UMASK}" ]; then - umask "${UMASK}" -fi - -# Compose Redis URLs from base variables if not explicitly provided -if [ -z "${REDIS_URL}" ]; then - REDIS_HOST=${REDIS_HOST:-redis} - REDIS_PORT=${REDIS_PORT:-6379} - REDIS_DB=${REDIS_DB:-0} - - if [ -n "${REDIS_PASSWORD}" ]; then - if [ -n "${REDIS_USERNAME}" ]; then - AUTH_PART="${REDIS_USERNAME}:${REDIS_PASSWORD}@" - else - AUTH_PART=":${REDIS_PASSWORD}@" - fi - else - AUTH_PART="" - fi - export REDIS_URL="redis://${AUTH_PART}${REDIS_HOST}:${REDIS_PORT}/${REDIS_DB}" -fi - -if [ -z "${REDIS_BACKEND}" ]; then - export REDIS_BACKEND="${REDIS_URL}" -fi - -# Redis is now in a separate container so we don't need to start it locally -echo "Using Redis at ${REDIS_URL}" - -# Check if both PUID and PGID are not set -if [ -z "${PUID}" ] && [ -z "${PGID}" ]; then - # Run as root directly - echo "Running as root user (no PUID/PGID specified)" - exec python app.py -else - # Verify both PUID and PGID are set - if [ -z "${PUID}" ] || [ -z "${PGID}" ]; then - echo "ERROR: Must supply both PUID and PGID or neither" - exit 1 - fi - - # Check for root user request - if [ "${PUID}" -eq 0 ] && [ "${PGID}" -eq 0 ]; then - echo "Running as root user (PUID/PGID=0)" - exec python app.py - else - # Check if the group with the specified GID already exists - if getent group "${PGID}" >/dev/null; then - # If the group exists, use its name instead of creating a new one - GROUP_NAME=$(getent group "${PGID}" | cut -d: -f1) - echo "Using existing group: ${GROUP_NAME} (GID: ${PGID})" - else - # If the group doesn't exist, create it - GROUP_NAME="appgroup" - groupadd -g "${PGID}" "${GROUP_NAME}" - echo "Created group: ${GROUP_NAME} (GID: ${PGID})" - fi - - # Check if the user with the specified UID already exists - if getent passwd "${PUID}" >/dev/null; then - # If the user exists, use its name instead of creating a new one - USER_NAME=$(getent passwd "${PUID}" | cut -d: -f1) - echo "Using existing user: ${USER_NAME} (UID: ${PUID})" - else - # If the user doesn't exist, create it - USER_NAME="appuser" - useradd -u "${PUID}" -g "${GROUP_NAME}" -d /app "${USER_NAME}" - echo "Created user: ${USER_NAME} (UID: ${PUID})" - fi - - # Ensure proper permissions for all app directories unless skipped via env var - if [ "${SKIP_SET_PERMISSIONS}" = "true" ] || [ "${SKIP_SET_PERMISSIONS}" = "1" ]; then - echo "SKIP_SET_PERMISSIONS is set; skipping permissions for /app/downloads /app/data /app/logs" - else - echo "Setting permissions for /app directories..." - chown -R "${USER_NAME}:${GROUP_NAME}" /app/downloads /app/data /app/logs || true - fi - - # Ensure Spotipy cache file exists and is writable (fast, local to container) - touch /app/.cache || true - chown "${USER_NAME}:${GROUP_NAME}" /app/.cache || true - - # Run as specified user - echo "Starting application as ${USER_NAME}..." - exec gosu "${USER_NAME}" python app.py - fi -fi \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..1b517da --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,30 @@ +site_name: Spotizerr Documentation +site_description: Straight-to-the-point docs for Spotizerr +site_dir: site +use_directory_urls: true +theme: + name: material + features: + - navigation.instant + - navigation.tracking + - content.action.edit + - search.suggest + - search.highlight +nav: + - Getting started: user/getting-started + - Configuration: user/configuration + - Environment: user/environment + - Tracks: user/tracks + - Albums: user/albums + - Playlists: user/playlists + - Artists: user/artists + - Watchlist: user/watchlist + - History: user/history + - Multi-user: user/multi-user + - API: api +markdown_extensions: + - toc: + permalink: true + - admonition + - tables + - fenced_code \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index eb1861e..2ea3f67 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,11 @@ fastapi==0.116.1 uvicorn[standard]==0.35.0 celery==5.5.3 -deezspot-spotizerr==2.7.3 +deezspot-spotizerr==2.7.6 httpx==0.28.1 bcrypt==4.2.1 PyJWT==2.10.1 python-multipart==0.0.17 fastapi-sso==0.18.0 +redis==5.0.7 +async-timeout==4.0.3 diff --git a/routes/auth/credentials.py b/routes/auth/credentials.py index 8b8496f..6427933 100755 --- a/routes/auth/credentials.py +++ b/routes/auth/credentials.py @@ -23,6 +23,22 @@ router = APIRouter() init_credentials_db() +def _set_active_account_if_empty(service: str, name: str): + """ + Sets the newly created account as the active account in the main config + if no active account is currently set for the given service. + """ + try: + from routes.utils.celery_config import get_config_params as get_main_config_params + from routes.system.config import save_config + config = get_main_config_params() + if not config.get(service): + config[service] = name + save_config(config) + except Exception as e: + logger.warning(f"Could not set new {service.capitalize()} account '{name}' as active: {e}") + + @router.get("/spotify_api_config") @router.put("/spotify_api_config") async def handle_spotify_api_config(request: Request, current_user: User = Depends(require_admin_from_state)): @@ -130,18 +146,7 @@ async def handle_create_credential(service: str, name: str, request: Request, cu # Validation is handled within create_credential utility function result = create_credential(service, name, data) - # set as active Spotify account if none is set - if service == "spotify": - try: - from routes.utils.celery_config import get_config_params as get_main_config_params - from routes.system.config import save_config - config = get_main_config_params() - # The field is likely "spotify" (as used in frontend) - if not config.get("spotify"): - config["spotify"] = name - save_config(config) - except Exception as e: - logger.warning(f"Could not set new Spotify account '{name}' as active: {e}") + _set_active_account_if_empty(service, name) return { "message": f"Credential for '{name}' ({service}) created successfully.", diff --git a/routes/migrations/runner.py b/routes/migrations/runner.py index 820ac28..4981ac9 100644 --- a/routes/migrations/runner.py +++ b/routes/migrations/runner.py @@ -4,6 +4,7 @@ from pathlib import Path from typing import Optional from .v3_2_0 import MigrationV3_2_0 +from .v3_2_1 import log_noop_migration_detected logger = logging.getLogger(__name__) @@ -285,7 +286,6 @@ def _update_watch_playlists_db(conn: sqlite3.Connection) -> None: EXPECTED_PLAYLIST_TRACKS_COLUMNS, f"playlist tracks ({table_name})", ) - logger.info("Upgraded watch playlists DB to 3.2.0 base schema") except Exception: logger.error( "Failed to upgrade watch playlists DB to 3.2.0 base schema", exc_info=True @@ -348,7 +348,6 @@ def _update_watch_artists_db(conn: sqlite3.Connection) -> None: EXPECTED_ARTIST_ALBUMS_COLUMNS, f"artist albums ({table_name})", ) - logger.info("Upgraded watch artists DB to 3.2.0 base schema") except Exception: logger.error( "Failed to upgrade watch artists DB to 3.2.0 base schema", exc_info=True @@ -379,10 +378,10 @@ def run_migrations_if_needed(): with _safe_connect(HISTORY_DB) as history_conn: if history_conn and not _is_history_at_least_3_2_0(history_conn): logger.error( - "Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.2.1." + "Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.3.0." ) raise RuntimeError( - "Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.2.1." + "Instance is not at schema version 3.2.0. Please upgrade to 3.2.0 before applying 3.3.0." ) # Watch playlists DB @@ -413,4 +412,5 @@ def run_migrations_if_needed(): raise else: _ensure_creds_filesystem() - logger.info("Database migrations check completed (3.2.0 -> 3.2.1 path)") + log_noop_migration_detected() + logger.info("Database migrations check completed (3.2.0 -> 3.3.0 path)") diff --git a/routes/migrations/v3_2_0.py b/routes/migrations/v3_2_0.py index 4307bec..3849210 100644 --- a/routes/migrations/v3_2_0.py +++ b/routes/migrations/v3_2_0.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) class MigrationV3_2_0: """ - Migration for version 3.2.0 (upgrade path 3.2.0 -> 3.2.1). + Migration for version 3.2.0 (upgrade path 3.2.0 -> 3.3.0). - Adds per-item batch progress columns to Watch DBs to support page-by-interval processing. - Enforces prerequisite: previous instance version must be 3.1.2 (validated by runner). """ @@ -21,7 +21,7 @@ class MigrationV3_2_0: "batch_next_offset": "INTEGER DEFAULT 0", } - # --- No-op for history/accounts in 3.2.1 --- + # --- No-op for history/accounts in 3.3.0 --- def check_history(self, conn: sqlite3.Connection) -> bool: return True @@ -59,14 +59,14 @@ class MigrationV3_2_0: f"ALTER TABLE watched_playlists ADD COLUMN {col_name} {col_type}" ) logger.info( - f"Added column '{col_name} {col_type}' to watched_playlists for 3.2.1 batch progress." + f"Added column '{col_name} {col_type}' to watched_playlists for 3.3.0 batch progress." ) except sqlite3.OperationalError as e: logger.warning( f"Could not add column '{col_name}' to watched_playlists: {e}" ) except Exception: - logger.error("Failed to update watched_playlists for 3.2.1", exc_info=True) + logger.error("Failed to update watched_playlists for 3.3.0", exc_info=True) # --- Watch: artists --- @@ -90,11 +90,11 @@ class MigrationV3_2_0: f"ALTER TABLE watched_artists ADD COLUMN {col_name} {col_type}" ) logger.info( - f"Added column '{col_name} {col_type}' to watched_artists for 3.2.1 batch progress." + f"Added column '{col_name} {col_type}' to watched_artists for 3.3.0 batch progress." ) except sqlite3.OperationalError as e: logger.warning( f"Could not add column '{col_name}' to watched_artists: {e}" ) except Exception: - logger.error("Failed to update watched_artists for 3.2.1", exc_info=True) + logger.error("Failed to update watched_artists for 3.3.0", exc_info=True) diff --git a/routes/migrations/v3_2_1.py b/routes/migrations/v3_2_1.py new file mode 100644 index 0000000..d8cad20 --- /dev/null +++ b/routes/migrations/v3_2_1.py @@ -0,0 +1,41 @@ +import logging +import sqlite3 + +logger = logging.getLogger(__name__) + + +class MigrationV3_2_1: + """ + No-op migration for version 3.2.1 (upgrade path 3.2.1 -> 3.3.0). + No database schema changes are required. + """ + + def check_history(self, conn: sqlite3.Connection) -> bool: + return True + + def update_history(self, conn: sqlite3.Connection) -> None: + pass + + def check_accounts(self, conn: sqlite3.Connection) -> bool: + return True + + def update_accounts(self, conn: sqlite3.Connection) -> None: + pass + + def check_watch_playlists(self, conn: sqlite3.Connection) -> bool: + return True + + def update_watch_playlists(self, conn: sqlite3.Connection) -> None: + pass + + def check_watch_artists(self, conn: sqlite3.Connection) -> bool: + return True + + def update_watch_artists(self, conn: sqlite3.Connection) -> None: + pass + + +def log_noop_migration_detected() -> None: + logger.info( + "No migration performed: detected schema for 3.2.1; no changes needed for 3.2.1 -> 3.3.0." + ) diff --git a/routes/system/progress.py b/routes/system/progress.py index 224c70e..a2b1a66 100755 --- a/routes/system/progress.py +++ b/routes/system/progress.py @@ -1,10 +1,14 @@ from fastapi import APIRouter, HTTPException, Request, Depends -from fastapi.responses import JSONResponse, StreamingResponse +from fastapi.responses import StreamingResponse import logging import time import json import asyncio -from typing import Dict, Set +from typing import Set + +import redis +import threading +from routes.utils.celery_config import REDIS_URL from routes.utils.celery_tasks import ( get_task_info, @@ -12,22 +16,28 @@ from routes.utils.celery_tasks import ( get_last_task_status, get_all_tasks, cancel_task, + delete_task_data_and_log, ProgressState, ) # Import authentication dependencies -from routes.auth.middleware import require_auth_from_state, get_current_user_from_state, User +from routes.auth.middleware import ( + require_auth_from_state, + get_current_user_from_state, + User, +) # Configure logging logger = logging.getLogger(__name__) router = APIRouter() + # Global SSE Event Broadcaster class SSEBroadcaster: def __init__(self): self.clients: Set[asyncio.Queue] = set() - + async def add_client(self, queue: asyncio.Queue): """Add a new SSE client""" self.clients.add(queue) @@ -40,19 +50,23 @@ class SSEBroadcaster: async def broadcast_event(self, event_data: dict): """Broadcast an event to all connected clients""" - logger.debug(f"SSE Broadcaster: Attempting to broadcast to {len(self.clients)} clients") - + logger.debug( + f"SSE Broadcaster: Attempting to broadcast to {len(self.clients)} clients" + ) + if not self.clients: logger.debug("SSE Broadcaster: No clients connected, skipping broadcast") return - + # Add global task counts right before broadcasting - this is the single source of truth enhanced_event_data = add_global_task_counts_to_event(event_data.copy()) event_json = json.dumps(enhanced_event_data) sse_data = f"data: {event_json}\n\n" - - logger.debug(f"SSE Broadcaster: Broadcasting event: {enhanced_event_data.get('change_type', 'unknown')} with {enhanced_event_data.get('active_tasks', 0)} active tasks") - + + logger.debug( + f"SSE Broadcaster: Broadcasting event: {enhanced_event_data.get('change_type', 'unknown')} with {enhanced_event_data.get('active_tasks', 0)} active tasks" + ) + # Send to all clients, remove disconnected ones disconnected = set() sent_count = 0 @@ -60,64 +74,81 @@ class SSEBroadcaster: try: await client_queue.put(sse_data) sent_count += 1 - logger.debug(f"SSE: Successfully sent to client queue") + logger.debug("SSE: Successfully sent to client queue") except Exception as e: logger.error(f"SSE: Failed to send to client: {e}") disconnected.add(client_queue) - + # Clean up disconnected clients for client in disconnected: self.clients.discard(client) - - logger.debug(f"SSE Broadcaster: Successfully sent to {sent_count} clients, removed {len(disconnected)} disconnected clients") + logger.info( + f"SSE Broadcaster: Successfully sent to {sent_count} clients, removed {len(disconnected)} disconnected clients" + ) + # Global broadcaster instance sse_broadcaster = SSEBroadcaster() # Redis subscriber for cross-process SSE events -import redis -import threading -from routes.utils.celery_config import REDIS_URL # Redis client for SSE pub/sub sse_redis_client = redis.Redis.from_url(REDIS_URL) + def start_sse_redis_subscriber(): """Start Redis subscriber to listen for SSE events from Celery workers""" + def redis_subscriber_thread(): try: pubsub = sse_redis_client.pubsub() pubsub.subscribe("sse_events") logger.info("SSE Redis Subscriber: Started listening for events") - + for message in pubsub.listen(): - if message['type'] == 'message': + if message["type"] == "message": try: - event_data = json.loads(message['data'].decode('utf-8')) - event_type = event_data.get('event_type', 'unknown') - task_id = event_data.get('task_id', 'unknown') - - logger.debug(f"SSE Redis Subscriber: Received {event_type} for task {task_id}") - + event_data = json.loads(message["data"].decode("utf-8")) + event_type = event_data.get("event_type", "unknown") + task_id = event_data.get("task_id", "unknown") + + logger.debug( + f"SSE Redis Subscriber: Received {event_type} for task {task_id}" + ) + # Handle different event types - if event_type == 'progress_update': + if event_type == "progress_update": # Transform callback data into task format expected by frontend loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: - broadcast_data = loop.run_until_complete(transform_callback_to_task_format(task_id, event_data)) + broadcast_data = loop.run_until_complete( + transform_callback_to_task_format( + task_id, event_data + ) + ) if broadcast_data: - loop.run_until_complete(sse_broadcaster.broadcast_event(broadcast_data)) - logger.debug(f"SSE Redis Subscriber: Broadcasted callback to {len(sse_broadcaster.clients)} clients") + loop.run_until_complete( + sse_broadcaster.broadcast_event(broadcast_data) + ) + logger.debug( + f"SSE Redis Subscriber: Broadcasted callback to {len(sse_broadcaster.clients)} clients" + ) finally: loop.close() - elif event_type == 'summary_update': + elif event_type == "summary_update": # Task summary update - use existing trigger_sse_update logic loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: - loop.run_until_complete(trigger_sse_update(task_id, event_data.get('reason', 'update'))) - logger.debug(f"SSE Redis Subscriber: Processed summary update for {task_id}") + loop.run_until_complete( + trigger_sse_update( + task_id, event_data.get("reason", "update") + ) + ) + logger.debug( + f"SSE Redis Subscriber: Processed summary update for {task_id}" + ) finally: loop.close() else: @@ -125,50 +156,58 @@ def start_sse_redis_subscriber(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: - loop.run_until_complete(sse_broadcaster.broadcast_event(event_data)) - logger.debug(f"SSE Redis Subscriber: Broadcasted {event_type} to {len(sse_broadcaster.clients)} clients") + loop.run_until_complete( + sse_broadcaster.broadcast_event(event_data) + ) + logger.debug( + f"SSE Redis Subscriber: Broadcasted {event_type} to {len(sse_broadcaster.clients)} clients" + ) finally: loop.close() - + except Exception as e: - logger.error(f"SSE Redis Subscriber: Error processing message: {e}", exc_info=True) - + logger.error( + f"SSE Redis Subscriber: Error processing message: {e}", + exc_info=True, + ) + except Exception as e: logger.error(f"SSE Redis Subscriber: Fatal error: {e}", exc_info=True) - + # Start Redis subscriber in background thread thread = threading.Thread(target=redis_subscriber_thread, daemon=True) thread.start() logger.debug("SSE Redis Subscriber: Background thread started") + async def transform_callback_to_task_format(task_id: str, event_data: dict) -> dict: """Transform callback event data into the task format expected by frontend""" try: # Import here to avoid circular imports - from routes.utils.celery_tasks import get_task_info, get_all_tasks - + from routes.utils.celery_tasks import get_task_info + # Get task info to build complete task object task_info = get_task_info(task_id) if not task_info: logger.warning(f"SSE Transform: No task info found for {task_id}") return None - + # Extract callback data - callback_data = event_data.get('callback_data', {}) - + callback_data = event_data.get("callback_data", {}) + # Build task object in the format expected by frontend task_object = { "task_id": task_id, "original_url": f"http://localhost:7171/api/{task_info.get('download_type', 'track')}/download/{task_info.get('url', '').split('/')[-1] if task_info.get('url') else ''}", "last_line": callback_data, # This is what frontend expects for callback data - "timestamp": event_data.get('timestamp', time.time()), - "download_type": task_info.get('download_type', 'track'), - "type": task_info.get('type', task_info.get('download_type', 'track')), - "name": task_info.get('name', 'Unknown'), - "artist": task_info.get('artist', ''), - "created_at": task_info.get('created_at'), + "timestamp": event_data.get("timestamp", time.time()), + "download_type": task_info.get("download_type", "track"), + "type": task_info.get("type", task_info.get("download_type", "track")), + "name": task_info.get("name", "Unknown"), + "artist": task_info.get("artist", ""), + "created_at": task_info.get("created_at"), } - + # Build minimal event data - global counts will be added at broadcast time return { "change_type": "update", # Use "update" so it gets processed by existing frontend logic @@ -176,84 +215,98 @@ async def transform_callback_to_task_format(task_id: str, event_data: dict) -> d "current_timestamp": time.time(), "updated_count": 1, "since_timestamp": time.time(), - "trigger_reason": "callback_update" + "trigger_reason": "callback_update", } - + except Exception as e: - logger.error(f"SSE Transform: Error transforming callback for task {task_id}: {e}", exc_info=True) + logger.error( + f"SSE Transform: Error transforming callback for task {task_id}: {e}", + exc_info=True, + ) return None + # Start the Redis subscriber when module loads start_sse_redis_subscriber() + async def trigger_sse_update(task_id: str, reason: str = "task_update"): """Trigger an immediate SSE update for a specific task""" try: current_time = time.time() - + # Find the specific task that changed task_info = get_task_info(task_id) if not task_info: logger.warning(f"SSE: Task {task_id} not found for update") return - + last_status = get_last_task_status(task_id) - + # Create a dummy request for the _build_task_response function - task_response = _build_task_response(task_info, last_status, task_id, current_time, request=None) - + class DummyRequest: + def __init__(self): + self.base_url = "http://localhost:7171" + + dummy_request = DummyRequest() + task_response = _build_task_response( + task_info, last_status, task_id, current_time, dummy_request + ) + # Create minimal event data - global counts will be added at broadcast time event_data = { "tasks": [task_response], "current_timestamp": current_time, "since_timestamp": current_time, "change_type": "realtime", - "trigger_reason": reason + "trigger_reason": reason, } - + await sse_broadcaster.broadcast_event(event_data) logger.debug(f"SSE: Broadcast update for task {task_id} (reason: {reason})") - + except Exception as e: logger.error(f"SSE: Failed to trigger update for task {task_id}: {e}") + # Define active task states using ProgressState constants ACTIVE_TASK_STATES = { - ProgressState.INITIALIZING, # "initializing" - task is starting up - ProgressState.PROCESSING, # "processing" - task is being processed - ProgressState.DOWNLOADING, # "downloading" - actively downloading - ProgressState.PROGRESS, # "progress" - album/playlist progress updates - ProgressState.TRACK_PROGRESS, # "track_progress" - real-time track progress - ProgressState.REAL_TIME, # "real_time" - real-time download progress - ProgressState.RETRYING, # "retrying" - task is retrying after error - "real-time", # "real-time" - real-time download progress (hyphenated version) - ProgressState.QUEUED, # "queued" - task is queued and waiting - "pending", # "pending" - legacy queued status + ProgressState.INITIALIZING, # "initializing" - task is starting up + ProgressState.PROCESSING, # "processing" - task is being processed + ProgressState.DOWNLOADING, # "downloading" - actively downloading + ProgressState.PROGRESS, # "progress" - album/playlist progress updates + ProgressState.TRACK_PROGRESS, # "track_progress" - real-time track progress + ProgressState.REAL_TIME, # "real_time" - real-time download progress + ProgressState.RETRYING, # "retrying" - task is retrying after error + "real-time", # "real-time" - real-time download progress (hyphenated version) + ProgressState.QUEUED, # "queued" - task is queued and waiting + "pending", # "pending" - legacy queued status } # Define terminal task states that should be included when recently completed TERMINAL_TASK_STATES = { - ProgressState.COMPLETE, # "complete" - task completed successfully - ProgressState.DONE, # "done" - task finished processing - ProgressState.ERROR, # "error" - task failed - ProgressState.CANCELLED, # "cancelled" - task was cancelled - ProgressState.SKIPPED, # "skipped" - task was skipped + ProgressState.COMPLETE, # "complete" - task completed successfully + ProgressState.DONE, # "done" - task finished processing + ProgressState.ERROR, # "error" - task failed + ProgressState.CANCELLED, # "cancelled" - task was cancelled + ProgressState.SKIPPED, # "skipped" - task was skipped } + def get_task_status_from_last_status(last_status): """ Extract the task status from last_status, checking both possible locations. Uses improved priority logic to handle real-time downloads correctly. - + Args: last_status: The last status dict from get_last_task_status() - + Returns: str: The task status string """ if not last_status: return "unknown" - + # For real-time downloads, prioritize status_info.status as it contains the actual progress state status_info = last_status.get("status_info", {}) if isinstance(status_info, dict) and "status" in status_info: @@ -261,10 +314,10 @@ def get_task_status_from_last_status(last_status): # If status_info contains an active status, use it regardless of top-level status if status_info_status in ACTIVE_TASK_STATES: return status_info_status - + # Fall back to top-level status top_level_status = last_status.get("status", "unknown") - + # If both exist but neither is active, prefer the more recent one (usually top-level) # For active states, we already handled status_info above return top_level_status @@ -273,10 +326,10 @@ def get_task_status_from_last_status(last_status): def is_task_active(task_status): """ Determine if a task is currently active (working/processing). - + Args: task_status: The status string from the task - + Returns: bool: True if the task is active, False otherwise """ @@ -289,24 +342,24 @@ def get_global_task_counts(): """ Get comprehensive task counts for ALL tasks in Redis. This is called right before sending SSE events to ensure accurate counts. - + Returns: dict: Task counts by status """ task_counts = { "active": 0, - "queued": 0, + "queued": 0, "completed": 0, "error": 0, "cancelled": 0, "retrying": 0, - "skipped": 0 + "skipped": 0, } - + try: # Get ALL tasks from Redis - this is the source of truth all_tasks = get_all_tasks() - + for task_summary in all_tasks: task_id = task_summary.get("task_id") if not task_id: @@ -319,7 +372,7 @@ def get_global_task_counts(): last_status = get_last_task_status(task_id) task_status = get_task_status_from_last_status(last_status) is_active_task = is_task_active(task_status) - + # Categorize tasks by status using ProgressState constants if task_status == ProgressState.RETRYING: task_counts["retrying"] += 1 @@ -335,12 +388,14 @@ def get_global_task_counts(): task_counts["skipped"] += 1 elif is_active_task: task_counts["active"] += 1 - - logger.debug(f"Global task counts: {task_counts} (total: {len(all_tasks)} tasks)") - + + logger.debug( + f"Global task counts: {task_counts} (total: {len(all_tasks)} tasks)" + ) + except Exception as e: logger.error(f"Error getting global task counts: {e}", exc_info=True) - + return task_counts @@ -348,26 +403,28 @@ def add_global_task_counts_to_event(event_data): """ Add global task counts to any SSE event data right before broadcasting. This ensures all SSE events have accurate, up-to-date counts of ALL tasks. - + Args: event_data: The event data dictionary to be sent via SSE - + Returns: dict: Enhanced event data with global task counts """ try: # Get fresh counts of ALL tasks right before sending global_task_counts = get_global_task_counts() - + # Add/update the counts in the event data event_data["task_counts"] = global_task_counts event_data["active_tasks"] = global_task_counts["active"] event_data["all_tasks_count"] = sum(global_task_counts.values()) - + return event_data - + except Exception as e: - logger.error(f"Error adding global task counts to SSE event: {e}", exc_info=True) + logger.error( + f"Error adding global task counts to SSE event: {e}", exc_info=True + ) return event_data @@ -391,10 +448,9 @@ def _build_error_callback_object(last_status): callback_object["album"] = { "type": "album", "title": name, - "artists": [{ - "type": "artistAlbum", - "name": artist_or_owner - }] if artist_or_owner else [], + "artists": [{"type": "artistAlbum", "name": artist_or_owner}] + if artist_or_owner + else [], } elif download_type == "playlist": playlist_payload = {"type": "playlist", "title": name} @@ -405,10 +461,9 @@ def _build_error_callback_object(last_status): callback_object["track"] = { "type": "track", "title": name, - "artists": [{ - "type": "artistTrack", - "name": artist_or_owner - }] if artist_or_owner else [], + "artists": [{"type": "artistTrack", "name": artist_or_owner}] + if artist_or_owner + else [], } else: # Fallback for unknown types to avoid breaking the client, returning a basic error structure. @@ -425,7 +480,9 @@ def _build_error_callback_object(last_status): return callback_object -def _build_task_response(task_info, last_status, task_id, current_time, request: Optional[Request] = None): +def _build_task_response( + task_info, last_status, task_id, current_time, request: Request +): """ Helper function to build a standardized task response object. """ @@ -459,6 +516,27 @@ def _build_task_response(task_info, last_status, task_id, current_time, request: logger.warning( f"Missing download_type ('{download_type}') or item_url ('{item_url}') in task_info for task {task_id}. Falling back for original_url." ) + # Auto-delete faulty task data to keep the queue clean + try: + delete_task_data_and_log( + task_id, + reason="Auto-cleaned: Missing download_type or url in task_info.", + ) + # Trigger SSE so clients refresh their task lists + try: + # Avoid circular import at top-level + import asyncio as _asyncio + + # Fire-and-forget; if no event loop available, ignore + loop = _asyncio.get_event_loop() + if loop.is_running(): + _asyncio.create_task( + trigger_sse_update(task_id, "auto_deleted_faulty") + ) + except Exception: + pass + except Exception as _e: + logger.error(f"Auto-delete failed for faulty task {task_id}: {_e}") original_request_obj = task_info.get("original_request", {}) dynamic_original_url = original_request_obj.get("original_url", "") @@ -472,13 +550,18 @@ def _build_task_response(task_info, last_status, task_id, current_time, request: else: last_line_content = last_status + # Normalize created_at to a numeric timestamp + created_at_value = task_info.get("created_at") + if not isinstance(created_at_value, (int, float)): + created_at_value = current_time + task_response = { "original_url": dynamic_original_url, "last_line": last_line_content, "timestamp": last_status.get("timestamp") if last_status else current_time, "task_id": task_id, "status_count": status_count, - "created_at": task_info.get("created_at"), + "created_at": created_at_value, "name": task_info.get("name"), "artist": task_info.get("artist"), "type": task_info.get("type"), @@ -496,13 +579,13 @@ async def get_paginated_tasks(page=1, limit=20, active_only=False, request: Opti """ try: all_tasks = get_all_tasks() - - # Get global task counts + + # Get global task counts task_counts = get_global_task_counts() - + active_tasks = [] other_tasks = [] - + # Process tasks for pagination and response building for task_summary in all_tasks: task_id = task_summary.get("task_id") @@ -516,17 +599,19 @@ async def get_paginated_tasks(page=1, limit=20, active_only=False, request: Opti last_status = get_last_task_status(task_id) task_status = get_task_status_from_last_status(last_status) is_active_task = is_task_active(task_status) - - task_response = _build_task_response(task_info, last_status, task_id, time.time(), request) - + + task_response = _build_task_response( + task_info, last_status, task_id, time.time(), request + ) + if is_active_task: active_tasks.append(task_response) else: other_tasks.append(task_response) # Sort other tasks by creation time (newest first) - other_tasks.sort(key=lambda x: x.get("created_at", 0), reverse=True) - + other_tasks.sort(key=lambda x: (x.get("created_at") or 0.0), reverse=True) + if active_only: paginated_tasks = active_tasks pagination_info = { @@ -534,49 +619,55 @@ async def get_paginated_tasks(page=1, limit=20, active_only=False, request: Opti "limit": limit, "total_non_active": 0, "has_more": False, - "returned_non_active": 0 + "returned_non_active": 0, } else: # Apply pagination to non-active tasks offset = (page - 1) * limit - paginated_other_tasks = other_tasks[offset:offset + limit] + paginated_other_tasks = other_tasks[offset : offset + limit] paginated_tasks = active_tasks + paginated_other_tasks - + pagination_info = { "page": page, "limit": limit, "total_non_active": len(other_tasks), "has_more": len(other_tasks) > offset + limit, - "returned_non_active": len(paginated_other_tasks) + "returned_non_active": len(paginated_other_tasks), } response = { "tasks": paginated_tasks, "current_timestamp": time.time(), - "total_tasks": task_counts["active"] + task_counts["retrying"], # Only active/retrying tasks for counter + "total_tasks": task_counts["active"] + + task_counts["retrying"], # Only active/retrying tasks for counter "all_tasks_count": len(all_tasks), # Total count of all tasks "task_counts": task_counts, # Categorized counts "active_tasks": len(active_tasks), "updated_count": len(paginated_tasks), - "pagination": pagination_info + "pagination": pagination_info, } - + return response - + except Exception as e: logger.error(f"Error in get_paginated_tasks: {e}", exc_info=True) - raise HTTPException(status_code=500, detail={"error": "Failed to retrieve paginated tasks"}) + raise HTTPException( + status_code=500, detail={"error": "Failed to retrieve paginated tasks"} + ) # IMPORTANT: Specific routes MUST come before parameterized routes in FastAPI # Otherwise "updates" gets matched as a {task_id} parameter! + @router.get("/list") -async def list_tasks(request: Request, current_user: User = Depends(require_auth_from_state)): +async def list_tasks( + request: Request, current_user: User = Depends(require_auth_from_state) +): """ Retrieve a paginated list of all tasks in the system. Returns a detailed list of task objects including status and metadata. - + Query parameters: page (int): Page number for pagination (default: 1) limit (int): Number of tasks per page (default: 50, max: 100) @@ -584,25 +675,25 @@ async def list_tasks(request: Request, current_user: User = Depends(require_auth """ try: # Get query parameters - page = int(request.query_params.get('page', 1)) - limit = min(int(request.query_params.get('limit', 50)), 100) # Cap at 100 - active_only = request.query_params.get('active_only', '').lower() == 'true' - + page = int(request.query_params.get("page", 1)) + limit = min(int(request.query_params.get("limit", 50)), 100) # Cap at 100 + active_only = request.query_params.get("active_only", "").lower() == "true" + tasks = get_all_tasks() active_tasks = [] other_tasks = [] - + # Task categorization counters task_counts = { "active": 0, - "queued": 0, + "queued": 0, "completed": 0, "error": 0, "cancelled": 0, "retrying": 0, - "skipped": 0 + "skipped": 0, } - + for task_summary in tasks: task_id = task_summary.get("task_id") if not task_id: @@ -615,11 +706,14 @@ async def list_tasks(request: Request, current_user: User = Depends(require_auth last_status = get_last_task_status(task_id) task_status = get_task_status_from_last_status(last_status) is_active_task = is_task_active(task_status) - + # Categorize tasks by status using ProgressState constants if task_status == ProgressState.RETRYING: task_counts["retrying"] += 1 - elif task_status in {ProgressState.QUEUED, "pending"}: # Keep "pending" for backward compatibility + elif task_status in { + ProgressState.QUEUED, + "pending", + }: # Keep "pending" for backward compatibility task_counts["queued"] += 1 elif task_status in {ProgressState.COMPLETE, ProgressState.DONE}: task_counts["completed"] += 1 @@ -631,17 +725,19 @@ async def list_tasks(request: Request, current_user: User = Depends(require_auth task_counts["skipped"] += 1 elif is_active_task: task_counts["active"] += 1 - - task_response = _build_task_response(task_info, last_status, task_id, time.time(), request) - + + task_response = _build_task_response( + task_info, last_status, task_id, time.time(), request + ) + if is_active_task: active_tasks.append(task_response) else: other_tasks.append(task_response) # Sort other tasks by creation time (newest first) - other_tasks.sort(key=lambda x: x.get("created_at", 0), reverse=True) - + other_tasks.sort(key=lambda x: (x.get("created_at") or 0.0), reverse=True) + if active_only: # Return only active tasks without pagination response_tasks = active_tasks @@ -650,17 +746,17 @@ async def list_tasks(request: Request, current_user: User = Depends(require_auth "limit": limit, "total_items": len(active_tasks), "total_pages": 1, - "has_more": False + "has_more": False, } else: # Apply pagination to non-active tasks and combine with active tasks offset = (page - 1) * limit - + # Always include active tasks at the top if page == 1: # For first page, include active tasks + first batch of other tasks available_space = limit - len(active_tasks) - paginated_other_tasks = other_tasks[:max(0, available_space)] + paginated_other_tasks = other_tasks[: max(0, available_space)] response_tasks = active_tasks + paginated_other_tasks else: # For subsequent pages, only include other tasks @@ -668,12 +764,14 @@ async def list_tasks(request: Request, current_user: User = Depends(require_auth adjusted_offset = offset - len(active_tasks) if adjusted_offset < 0: adjusted_offset = 0 - paginated_other_tasks = other_tasks[adjusted_offset:adjusted_offset + limit] + paginated_other_tasks = other_tasks[ + adjusted_offset : adjusted_offset + limit + ] response_tasks = paginated_other_tasks - + total_items = len(active_tasks) + len(other_tasks) total_pages = ((total_items - 1) // limit) + 1 if total_items > 0 else 1 - + pagination_info = { "page": page, "limit": limit, @@ -681,37 +779,42 @@ async def list_tasks(request: Request, current_user: User = Depends(require_auth "total_pages": total_pages, "has_more": page < total_pages, "active_tasks": len(active_tasks), - "total_other_tasks": len(other_tasks) + "total_other_tasks": len(other_tasks), } response = { "tasks": response_tasks, "pagination": pagination_info, - "total_tasks": task_counts["active"] + task_counts["retrying"], # Only active/retrying tasks for counter + "total_tasks": task_counts["active"] + + task_counts["retrying"], # Only active/retrying tasks for counter "all_tasks_count": len(tasks), # Total count of all tasks "task_counts": task_counts, # Categorized counts "active_tasks": len(active_tasks), - "timestamp": time.time() + "timestamp": time.time(), } return response except Exception as e: logger.error(f"Error in /api/prgs/list: {e}", exc_info=True) - raise HTTPException(status_code=500, detail={"error": "Failed to retrieve task list"}) + raise HTTPException( + status_code=500, detail={"error": "Failed to retrieve task list"} + ) @router.get("/updates") -async def get_task_updates(request: Request, current_user: User = Depends(require_auth_from_state)): +async def get_task_updates( + request: Request, current_user: User = Depends(require_auth_from_state) +): """ Retrieve only tasks that have been updated since the specified timestamp. This endpoint is optimized for polling to reduce unnecessary data transfer. - + Query parameters: since (float): Unix timestamp - only return tasks updated after this time page (int): Page number for pagination (default: 1) limit (int): Number of queued/completed tasks per page (default: 20, max: 100) active_only (bool): If true, only return active tasks (downloading, processing, etc.) - + Returns: JSON object containing: - tasks: Array of updated task objects @@ -722,31 +825,33 @@ async def get_task_updates(request: Request, current_user: User = Depends(requir """ try: # Get query parameters - since_param = request.query_params.get('since') - page = int(request.query_params.get('page', 1)) - limit = min(int(request.query_params.get('limit', 20)), 100) # Cap at 100 - active_only = request.query_params.get('active_only', '').lower() == 'true' - + since_param = request.query_params.get("since") + page = int(request.query_params.get("page", 1)) + limit = min(int(request.query_params.get("limit", 20)), 100) # Cap at 100 + active_only = request.query_params.get("active_only", "").lower() == "true" + if not since_param: # If no 'since' parameter, return paginated tasks (fallback behavior) response = await get_paginated_tasks(page, limit, active_only, request) return response - + try: since_timestamp = float(since_param) except (ValueError, TypeError): - raise HTTPException(status_code=400, detail={"error": "Invalid 'since' timestamp format"}) - + raise HTTPException( + status_code=400, detail={"error": "Invalid 'since' timestamp format"} + ) + # Get all tasks all_tasks = get_all_tasks() current_time = time.time() - + # Get global task counts task_counts = get_global_task_counts() - + updated_tasks = [] active_tasks = [] - + # Process tasks for filtering and response building for task_summary in all_tasks: task_id = task_summary.get("task_id") @@ -760,19 +865,31 @@ async def get_task_updates(request: Request, current_user: User = Depends(requir last_status = get_last_task_status(task_id) task_status = get_task_status_from_last_status(last_status) is_active_task = is_task_active(task_status) - + # Check if task has been updated since the given timestamp - task_timestamp = last_status.get("timestamp") if last_status else task_info.get("created_at", 0) - + task_timestamp = ( + last_status.get("timestamp") + if last_status + else task_info.get("created_at", 0) + ) + # Always include active tasks in updates, apply filtering to others # Also include recently completed/terminal tasks to ensure "done" status gets sent - is_recently_terminal = task_status in TERMINAL_TASK_STATES and task_timestamp > since_timestamp - should_include = is_active_task or (task_timestamp > since_timestamp and not active_only) or is_recently_terminal - + is_recently_terminal = ( + task_status in TERMINAL_TASK_STATES and task_timestamp > since_timestamp + ) + should_include = ( + is_active_task + or (task_timestamp > since_timestamp and not active_only) + or is_recently_terminal + ) + if should_include: # Construct the same detailed task object as in list_tasks() - task_response = _build_task_response(task_info, last_status, task_id, current_time, request) - + task_response = _build_task_response( + task_info, last_status, task_id, current_time, request + ) + if is_active_task: active_tasks.append(task_response) else: @@ -780,21 +897,26 @@ async def get_task_updates(request: Request, current_user: User = Depends(requir # Apply pagination to non-active tasks offset = (page - 1) * limit - paginated_updated_tasks = updated_tasks[offset:offset + limit] if not active_only else [] - + paginated_updated_tasks = ( + updated_tasks[offset : offset + limit] if not active_only else [] + ) + # Combine active tasks (always shown) with paginated updated tasks all_returned_tasks = active_tasks + paginated_updated_tasks - + # Sort by priority (active first, then by creation time) - all_returned_tasks.sort(key=lambda x: ( - 0 if x.get("task_id") in [t["task_id"] for t in active_tasks] else 1, - -(x.get("created_at") or 0) - )) + all_returned_tasks.sort( + key=lambda x: ( + 0 if x.get("task_id") in [t["task_id"] for t in active_tasks] else 1, + -(x.get("created_at") or 0), + ) + ) response = { "tasks": all_returned_tasks, "current_timestamp": current_time, - "total_tasks": task_counts["active"] + task_counts["retrying"], # Only active/retrying tasks for counter + "total_tasks": task_counts["active"] + + task_counts["retrying"], # Only active/retrying tasks for counter "all_tasks_count": len(all_tasks), # Total count of all tasks "task_counts": task_counts, # Categorized counts "active_tasks": len(active_tasks), @@ -805,18 +927,22 @@ async def get_task_updates(request: Request, current_user: User = Depends(requir "limit": limit, "total_non_active": len(updated_tasks), "has_more": len(updated_tasks) > offset + limit, - "returned_non_active": len(paginated_updated_tasks) - } + "returned_non_active": len(paginated_updated_tasks), + }, } - - logger.debug(f"Returning {len(active_tasks)} active + {len(paginated_updated_tasks)} paginated tasks out of {len(all_tasks)} total") + + logger.debug( + f"Returning {len(active_tasks)} active + {len(paginated_updated_tasks)} paginated tasks out of {len(all_tasks)} total" + ) return response - + except HTTPException: raise except Exception as e: logger.error(f"Error in /api/prgs/updates: {e}", exc_info=True) - raise HTTPException(status_code=500, detail={"error": "Failed to retrieve task updates"}) + raise HTTPException( + status_code=500, detail={"error": "Failed to retrieve task updates"} + ) @router.post("/cancel/all") @@ -849,11 +975,15 @@ async def cancel_all_tasks(current_user: User = Depends(require_auth_from_state) return response except Exception as e: logger.error(f"Error in /api/prgs/cancel/all: {e}", exc_info=True) - raise HTTPException(status_code=500, detail={"error": "Failed to cancel all tasks"}) + raise HTTPException( + status_code=500, detail={"error": "Failed to cancel all tasks"} + ) @router.post("/cancel/{task_id}") -async def cancel_task_endpoint(task_id: str, current_user: User = Depends(require_auth_from_state)): +async def cancel_task_endpoint( + task_id: str, current_user: User = Depends(require_auth_from_state) +): """ Cancel a running or queued task. @@ -882,7 +1012,7 @@ async def cancel_task_endpoint(task_id: str, current_user: User = Depends(requir detail={ "status": "error", "message": "Cancellation for old system is not supported in the new API. Please use the new task ID format.", - } + }, ) except HTTPException: raise @@ -891,7 +1021,9 @@ async def cancel_task_endpoint(task_id: str, current_user: User = Depends(requir @router.delete("/delete/{task_id}") -async def delete_task(task_id: str, current_user: User = Depends(require_auth_from_state)): +async def delete_task( + task_id: str, current_user: User = Depends(require_auth_from_state) +): """ Delete a task's information and history. @@ -910,26 +1042,28 @@ async def delete_task(task_id: str, current_user: User = Depends(require_auth_fr @router.get("/stream") -async def stream_task_updates(request: Request, current_user: User = Depends(get_current_user_from_state)): +async def stream_task_updates( + request: Request, current_user: User = Depends(get_current_user_from_state) +): """ Stream real-time task updates via Server-Sent Events (SSE). Now uses event-driven architecture for true real-time updates. Uses optional authentication to avoid breaking SSE connections. - + Query parameters: active_only (bool): If true, only stream active tasks (downloading, processing, etc.) - + Returns: Server-Sent Events stream with task update data in JSON format """ - + # Get query parameters - active_only = request.query_params.get('active_only', '').lower() == 'true' - + active_only = request.query_params.get("active_only", "").lower() == "true" + async def event_generator(): # Create a queue for this client client_queue = asyncio.Queue() - + try: # Register this client with the broadcaster logger.debug(f"SSE Stream: New client connecting...") @@ -937,25 +1071,29 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get logger.debug(f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}") # Send initial data immediately upon connection - initial_data = await generate_task_update_event(time.time(), active_only, request) + initial_data = await generate_task_update_event( + time.time(), active_only, request + ) yield initial_data - + # Also send any active tasks as callback-style events to newly connected clients all_tasks = get_all_tasks() for task_summary in all_tasks: task_id = task_summary.get("task_id") if not task_id: continue - + task_info = get_task_info(task_id) if not task_info: continue - + last_status = get_last_task_status(task_id) task_status = get_task_status_from_last_status(last_status) - + # Send recent callback data for active or recently completed tasks - if is_task_active(task_status) or (last_status and last_status.get("timestamp", 0) > time.time() - 30): + if is_task_active(task_status) or ( + last_status and last_status.get("timestamp", 0) > time.time() - 30 + ): if last_status and "raw_callback" in last_status: callback_event = { "task_id": task_id, @@ -963,7 +1101,7 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get "timestamp": last_status.get("timestamp", time.time()), "change_type": "callback", "event_type": "progress_update", - "replay": True # Mark as replay for client + "replay": True, # Mark as replay for client } event_json = json.dumps(callback_event) yield f"data: {event_json}\n\n" @@ -972,12 +1110,14 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get # Send periodic heartbeats and listen for real-time events last_heartbeat = time.time() heartbeat_interval = 30.0 - + while True: try: # Wait for either an event or timeout for heartbeat try: - event_data = await asyncio.wait_for(client_queue.get(), timeout=heartbeat_interval) + event_data = await asyncio.wait_for( + client_queue.get(), timeout=heartbeat_interval + ) # Send the real-time event yield event_data last_heartbeat = time.time() @@ -987,8 +1127,16 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get if current_time - last_heartbeat >= heartbeat_interval: # Generate current task counts for heartbeat all_tasks = get_all_tasks() - task_counts = {"active": 0, "queued": 0, "completed": 0, "error": 0, "cancelled": 0, "retrying": 0, "skipped": 0} - + task_counts = { + "active": 0, + "queued": 0, + "completed": 0, + "error": 0, + "cancelled": 0, + "retrying": 0, + "skipped": 0, + } + for task_summary in all_tasks: task_id = task_summary.get("task_id") if not task_id: @@ -997,13 +1145,18 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get if not task_info: continue last_status = get_last_task_status(task_id) - task_status = get_task_status_from_last_status(last_status) - + task_status = get_task_status_from_last_status( + last_status + ) + if task_status == ProgressState.RETRYING: task_counts["retrying"] += 1 elif task_status in {ProgressState.QUEUED, "pending"}: task_counts["queued"] += 1 - elif task_status in {ProgressState.COMPLETE, ProgressState.DONE}: + elif task_status in { + ProgressState.COMPLETE, + ProgressState.DONE, + }: task_counts["completed"] += 1 elif task_status == ProgressState.ERROR: task_counts["error"] += 1 @@ -1013,25 +1166,32 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get task_counts["skipped"] += 1 elif is_task_active(task_status): task_counts["active"] += 1 - + heartbeat_data = { "current_timestamp": current_time, - "total_tasks": task_counts["active"] + task_counts["retrying"], + "total_tasks": task_counts["active"] + + task_counts["retrying"], "task_counts": task_counts, - "change_type": "heartbeat" + "change_type": "heartbeat", } - + event_json = json.dumps(heartbeat_data) yield f"data: {event_json}\n\n" last_heartbeat = current_time - + except Exception as e: logger.error(f"Error in SSE event streaming: {e}", exc_info=True) # Send error event and continue - error_data = json.dumps({"error": "Internal server error", "timestamp": time.time(), "change_type": "error"}) + error_data = json.dumps( + { + "error": "Internal server error", + "timestamp": time.time(), + "change_type": "error", + } + ) yield f"data: {error_data}\n\n" await asyncio.sleep(1) - + except asyncio.CancelledError: logger.debug("SSE client disconnected") return @@ -1050,12 +1210,14 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get "Connection": "keep-alive", "Content-Type": "text/event-stream", "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Headers": "Cache-Control" - } + "Access-Control-Allow-Headers": "Cache-Control", + }, ) -async def generate_task_update_event(since_timestamp: float, active_only: bool, request: Request) -> str: +async def generate_task_update_event( + since_timestamp: float, active_only: bool, request: Request +) -> str: """ Generate initial task update event for SSE connection. This replicates the logic from get_task_updates but for SSE format. @@ -1064,10 +1226,10 @@ async def generate_task_update_event(since_timestamp: float, active_only: bool, # Get all tasks for filtering all_tasks = get_all_tasks() current_time = time.time() - + updated_tasks = [] active_tasks = [] - + # Process tasks for filtering only - no counting here for task_summary in all_tasks: task_id = task_summary.get("task_id") @@ -1081,19 +1243,31 @@ async def generate_task_update_event(since_timestamp: float, active_only: bool, last_status = get_last_task_status(task_id) task_status = get_task_status_from_last_status(last_status) is_active_task = is_task_active(task_status) - + # Check if task has been updated since the given timestamp - task_timestamp = last_status.get("timestamp") if last_status else task_info.get("created_at", 0) - + task_timestamp = ( + last_status.get("timestamp") + if last_status + else task_info.get("created_at", 0) + ) + # Always include active tasks in updates, apply filtering to others # Also include recently completed/terminal tasks to ensure "done" status gets sent - is_recently_terminal = task_status in TERMINAL_TASK_STATES and task_timestamp > since_timestamp - should_include = is_active_task or (task_timestamp > since_timestamp and not active_only) or is_recently_terminal - + is_recently_terminal = ( + task_status in TERMINAL_TASK_STATES and task_timestamp > since_timestamp + ) + should_include = ( + is_active_task + or (task_timestamp > since_timestamp and not active_only) + or is_recently_terminal + ) + if should_include: # Construct the same detailed task object as in updates endpoint - task_response = _build_task_response(task_info, last_status, task_id, current_time, request) - + task_response = _build_task_response( + task_info, last_status, task_id, current_time, request + ) + if is_active_task: active_tasks.append(task_response) else: @@ -1101,37 +1275,45 @@ async def generate_task_update_event(since_timestamp: float, active_only: bool, # Combine active tasks (always shown) with updated tasks all_returned_tasks = active_tasks + updated_tasks - + # Sort by priority (active first, then by creation time) - all_returned_tasks.sort(key=lambda x: ( - 0 if x.get("task_id") in [t["task_id"] for t in active_tasks] else 1, - -(x.get("created_at") or 0) - )) + all_returned_tasks.sort( + key=lambda x: ( + 0 if x.get("task_id") in [t["task_id"] for t in active_tasks] else 1, + -(x.get("created_at") or 0), + ) + ) initial_data = { "tasks": all_returned_tasks, "current_timestamp": current_time, "updated_count": len(updated_tasks), "since_timestamp": since_timestamp, - "initial": True # Mark as initial load + "initial": True, # Mark as initial load } - + # Add global task counts since this bypasses the broadcaster enhanced_data = add_global_task_counts_to_event(initial_data) - + event_data = json.dumps(enhanced_data) return f"data: {event_data}\n\n" - + except Exception as e: logger.error(f"Error generating initial SSE event: {e}", exc_info=True) - error_data = json.dumps({"error": "Failed to load initial data", "timestamp": time.time()}) + error_data = json.dumps( + {"error": "Failed to load initial data", "timestamp": time.time()} + ) return f"data: {error_data}\n\n" # IMPORTANT: This parameterized route MUST come AFTER all specific routes # Otherwise FastAPI will match specific routes like "/updates" as task_id parameters @router.get("/{task_id}") -async def get_task_details(task_id: str, request: Request, current_user: User = Depends(require_auth_from_state)): +async def get_task_details( + task_id: str, + request: Request, + current_user: User = Depends(require_auth_from_state), +): """ Return a JSON object with the resource type, its name (title), the last progress update, and, if available, the original request parameters. diff --git a/routes/utils/album.py b/routes/utils/album.py index be67078..b6fb6e5 100755 --- a/routes/utils/album.py +++ b/routes/utils/album.py @@ -101,7 +101,7 @@ def download_album( ) dl.download_albumspo( link_album=url, # Spotify URL - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, # Deezer quality recursive_quality=recursive_quality, recursive_download=False, @@ -159,7 +159,7 @@ def download_album( ) spo.download_album( link_album=url, # Spotify URL - output_dir="./downloads", + output_dir="/app/downloads", quality_download=fall_quality, # Spotify quality recursive_quality=recursive_quality, recursive_download=False, @@ -216,7 +216,7 @@ def download_album( ) spo.download_album( link_album=url, - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, recursive_quality=recursive_quality, recursive_download=False, @@ -260,7 +260,7 @@ def download_album( ) dl.download_albumdee( # Deezer URL, download via Deezer link_album=url, - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, recursive_quality=recursive_quality, recursive_download=False, diff --git a/routes/utils/celery_config.py b/routes/utils/celery_config.py index 70fd3a3..7cd852b 100644 --- a/routes/utils/celery_config.py +++ b/routes/utils/celery_config.py @@ -28,6 +28,7 @@ CONFIG_FILE_PATH = Path("./data/config/main.json") DEFAULT_MAIN_CONFIG = { "service": "spotify", + "version": "3.3.0", "spotify": "", "deezer": "", "fallback": False, diff --git a/routes/utils/celery_manager.py b/routes/utils/celery_manager.py index 9ce27b4..d26d291 100644 --- a/routes/utils/celery_manager.py +++ b/routes/utils/celery_manager.py @@ -3,6 +3,7 @@ import logging import time import threading import os +import sys # Import Celery task utilities from .celery_config import get_config_params, MAX_CONCURRENT_DL @@ -49,6 +50,8 @@ class CeleryManager: # %h is replaced by celery with the actual hostname. hostname = f"worker_{worker_name_suffix}@%h" command = [ + sys.executable, + "-m", "celery", "-A", self.app_name, @@ -76,11 +79,14 @@ class CeleryManager: log_method = logger.info # Default log method if error: # This is a stderr stream - if " - ERROR - " in line_stripped or " - CRITICAL - " in line_stripped: + if ( + " - ERROR - " in line_stripped + or " - CRITICAL - " in line_stripped + ): log_method = logger.error elif " - WARNING - " in line_stripped: log_method = logger.warning - + log_method(f"{log_prefix}: {line_stripped}") elif ( self.stop_event.is_set() @@ -155,7 +161,8 @@ class CeleryManager: queues="utility_tasks,default", # Listen to utility and default concurrency=5, # Increased concurrency for SSE updates and utility tasks worker_name_suffix="utw", # Utility Worker - log_level_env=os.getenv("LOG_LEVEL", "WARNING").upper(), + log_level_env=os.getenv("LOG_LEVEL", "ERROR").upper(), + ) logger.info( f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}" diff --git a/routes/utils/playlist.py b/routes/utils/playlist.py index efdec27..b19bd7c 100755 --- a/routes/utils/playlist.py +++ b/routes/utils/playlist.py @@ -98,7 +98,7 @@ def download_playlist( ) dl.download_playlistspo( link_playlist=url, # Spotify URL - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, # Deezer quality recursive_quality=recursive_quality, recursive_download=False, @@ -161,7 +161,7 @@ def download_playlist( ) spo.download_playlist( link_playlist=url, # Spotify URL - output_dir="./downloads", + output_dir="/app/downloads", quality_download=fall_quality, # Spotify quality recursive_quality=recursive_quality, recursive_download=False, @@ -224,7 +224,7 @@ def download_playlist( ) spo.download_playlist( link_playlist=url, - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, recursive_quality=recursive_quality, recursive_download=False, @@ -268,7 +268,7 @@ def download_playlist( ) dl.download_playlistdee( # Deezer URL, download via Deezer link_playlist=url, - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, recursive_quality=recursive_quality, # Usually False for playlists to get individual track qualities recursive_download=False, diff --git a/routes/utils/track.py b/routes/utils/track.py index e1f8b4a..7499d31 100755 --- a/routes/utils/track.py +++ b/routes/utils/track.py @@ -94,7 +94,7 @@ def download_track( # download_trackspo means: Spotify URL, download via Deezer dl.download_trackspo( link_track=url, # Spotify URL - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, # Deezer quality recursive_quality=recursive_quality, recursive_download=False, @@ -153,7 +153,7 @@ def download_track( ) spo.download_track( link_track=url, # Spotify URL - output_dir="./downloads", + output_dir="/app/downloads", quality_download=fall_quality, # Spotify quality recursive_quality=recursive_quality, recursive_download=False, @@ -169,7 +169,7 @@ def download_track( convert_to=convert_to, bitrate=bitrate, artist_separator=artist_separator, - real_time_multiplier=real_time_multiplier, + spotify_metadata=spotify_metadata, pad_number_width=pad_number_width, ) print( @@ -211,7 +211,7 @@ def download_track( ) spo.download_track( link_track=url, - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, recursive_quality=recursive_quality, recursive_download=False, @@ -254,7 +254,7 @@ def download_track( ) dl.download_trackdee( # Deezer URL, download via Deezer link_track=url, - output_dir="./downloads", + output_dir="/app/downloads", quality_download=quality, recursive_quality=recursive_quality, recursive_download=False, diff --git a/routes/utils/watch/manager.py b/routes/utils/watch/manager.py index 4a10e78..a4e7aa6 100644 --- a/routes/utils/watch/manager.py +++ b/routes/utils/watch/manager.py @@ -1098,7 +1098,7 @@ def update_playlist_m3u_file(playlist_spotify_id: str): # Get configuration settings output_dir = ( - "./downloads" # This matches the output_dir used in download functions + "/app/downloads" # This matches the output_dir used in download functions ) # Get all tracks for the playlist @@ -1125,14 +1125,14 @@ def update_playlist_m3u_file(playlist_spotify_id: str): skipped_missing_final_path = 0 for track in tracks: - # Use final_path from deezspot summary and convert from ./downloads to ../ relative path + # Use final_path from deezspot summary and convert from /app/downloads to ../ relative path final_path = track.get("final_path") if not final_path: skipped_missing_final_path += 1 continue normalized = str(final_path).replace("\\", "/") - if normalized.startswith("./downloads/"): - relative_path = normalized.replace("./downloads/", "../", 1) + if normalized.startswith("/app/downloads/"): + relative_path = normalized.replace("/app/downloads/", "../", 1) elif "/downloads/" in normalized.lower(): idx = normalized.lower().rfind("/downloads/") relative_path = "../" + normalized[idx + len("/downloads/") :] diff --git a/spotizerr-ui/package.json b/spotizerr-ui/package.json index 1473397..66fe2e1 100644 --- a/spotizerr-ui/package.json +++ b/spotizerr-ui/package.json @@ -1,7 +1,7 @@ { "name": "spotizerr-ui", "private": true, - "version": "3.2.1", + "version": "3.3.0", "type": "module", "scripts": { "dev": "vite", diff --git a/spotizerr-ui/src/components/SearchResultCard.tsx b/spotizerr-ui/src/components/SearchResultCard.tsx index 7522bcd..9add427 100644 --- a/spotizerr-ui/src/components/SearchResultCard.tsx +++ b/spotizerr-ui/src/components/SearchResultCard.tsx @@ -49,7 +49,7 @@ export const SearchResultCard = ({ id, name, subtitle, imageUrl, type, onDownloa { toast.success("Account added successfully!"); queryClient.invalidateQueries({ queryKey: ["credentials", activeService] }); - queryClient.invalidateQueries({ queryKey: ["config"] }); // Invalidate config to update active Spotify account in UI + queryClient.invalidateQueries({ queryKey: ["config"] }); // Invalidate config to update active Spotify/Deezer account in UI setIsAdding(false); setSubmitError(null); reset(); diff --git a/spotizerr-ui/src/routes/history.tsx b/spotizerr-ui/src/routes/history.tsx index 7effd19..70a69a0 100644 --- a/spotizerr-ui/src/routes/history.tsx +++ b/spotizerr-ui/src/routes/history.tsx @@ -166,18 +166,32 @@ export const History = () => { cell: (info) => { const entry = info.row.original; const isChild = "album_title" in entry; - return isChild ? ( + const historyEntry = entry as HistoryEntry; + const spotifyId = historyEntry.external_ids?.spotify; + const downloadType = historyEntry.download_type; + + const titleContent = isChild ? ( └─ {entry.title} ) : ( {entry.title} - {(entry as HistoryEntry).children_table && ( + {historyEntry.children_table && ( - {(entry as HistoryEntry).total_tracks || "?"} tracks + {historyEntry.total_tracks || "?"} tracks )} ); + + if (!isChild && spotifyId && downloadType) { + return ( + + {titleContent} + + ); + } + + return titleContent; }, }), columnHelper.accessor("artists", {