Merge pull request #311 from Phlogi/my-playlists-3

enhance global logging level
This commit is contained in:
Spotizerr
2025-08-23 21:55:31 -06:00
committed by GitHub
6 changed files with 109 additions and 39 deletions

View File

@@ -60,7 +60,7 @@ Access logs via Docker:
docker logs spotizerr
```
**Log Locations:**
**Log and File Locations:**
- Application Logs: `docker logs spotizerr` (main app and Celery workers)
- Individual Task Logs: `./logs/tasks/` (inside container, maps to your volume)
- Credentials: `./data/creds/`
@@ -70,6 +70,12 @@ docker logs spotizerr
- Download History Database: `./data/history/`
- Spotify Token Cache: `./.cache/` (if `SPOTIPY_CACHE_PATH` is mapped)
**Global Logging Level:**
The application's global logging level can be controlled via the `LOG_LEVEL` environment variable.
Supported values (case-insensitive): `CRITICAL`, `ERROR`, `WARNING`, `INFO`, `DEBUG`, `NOTSET`.
If not set, the default logging level is `WARNING`.
Example in `.env` file: `LOG_LEVEL=DEBUG`
## 🤝 Contributing
1. Fork the repository
@@ -77,6 +83,21 @@ docker logs spotizerr
3. Make your changes
4. Submit a pull request
Here is the text to add to your `README.md` file, preferably after the "Quick Start" section:
## 💻 Development Setup
To run Spotizerr in development mode:
1. **Backend (API):**
* Ensure Python dependencies are installed (e.g., using `uv pip install -r requirements.txt`).
* Start a Redis server.
* Run the app insidie your activated virtual env: `python3 app.py`
2. **Frontend (UI):**
* Navigate to `spotizerr-ui/`.
* Install dependencies: `pnpm install`.
* Start the development server: `pnpm dev`.
## 📄 License
This project is licensed under the GPL yada yada, see [LICENSE](LICENSE) file for details.

70
app.py
View File

@@ -13,6 +13,34 @@ import redis
import socket
from urllib.parse import urlparse
# Define a mapping from string log levels to logging constants
LOG_LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"NOTSET": logging.NOTSET,
}
# Run DB migrations as early as possible, before importing any routers that may touch DBs
try:
from routes.migrations import run_migrations_if_needed
run_migrations_if_needed()
logging.getLogger(__name__).info(
"Database migrations executed (if needed) early in startup."
)
except Exception as e:
logging.getLogger(__name__).error(
f"Database migration step failed early in startup: {e}", exc_info=True
)
sys.exit(1)
# Get log level from environment variable, default to INFO
log_level_str = os.getenv("LOG_LEVEL", "WARNING").upper()
log_level = LOG_LEVELS.get(log_level_str, logging.INFO)
# Apply process umask from environment as early as possible
_umask_value = os.getenv("UMASK")
if _umask_value:
@@ -22,7 +50,32 @@ if _umask_value:
# Defer logging setup; avoid failing on invalid UMASK
pass
# Import and initialize routes (this will start the watch manager)
from routes.auth.credentials import router as credentials_router
from routes.auth.auth import router as auth_router
from routes.content.album import router as album_router
from routes.content.artist import router as artist_router
from routes.content.track import router as track_router
from routes.content.playlist import router as playlist_router
from routes.content.bulk_add import router as bulk_add_router
from routes.core.search import router as search_router
from routes.core.history import router as history_router
from routes.system.progress import router as prgs_router
from routes.system.config import router as config_router
# Import Celery configuration and manager
from routes.utils.celery_manager import celery_manager
from routes.utils.celery_config import REDIS_URL
# Import authentication system
from routes.auth import AUTH_ENABLED
from routes.auth.middleware import AuthMiddleware
# Import watch manager controls (start/stop) without triggering side effects
from routes.utils.watch.manager import start_watch_manager, stop_watch_manager
# Configure application-wide logging
@@ -48,7 +101,7 @@ def setup_logging():
# Configure root logger
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
root_logger.setLevel(log_level)
# Clear any existing handlers from the root logger
if root_logger.hasHandlers():
@@ -65,12 +118,12 @@ def setup_logging():
main_log, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8"
)
file_handler.setFormatter(log_format)
file_handler.setLevel(logging.INFO)
file_handler.setLevel(log_level)
# Console handler for stderr
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(log_format)
console_handler.setLevel(logging.INFO)
console_handler.setLevel(log_level)
# Add handlers to root logger
root_logger.addHandler(file_handler)
@@ -83,10 +136,15 @@ def setup_logging():
"routes.utils.celery_manager",
"routes.utils.celery_tasks",
"routes.utils.watch",
"uvicorn", # General Uvicorn logger
"uvicorn.access", # Uvicorn access logs
"uvicorn.error", # Uvicorn error logs
]:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
logger.propagate = True # Propagate to root logger
logger.setLevel(log_level)
# For uvicorn.access, we explicitly set propagate to False to prevent duplicate logging
# if access_log=False is used in uvicorn.run, and to ensure our middleware handles it.
logger.propagate = False if logger_name == "uvicorn.access" else True
logging.info("Logging system initialized")
@@ -386,4 +444,4 @@ if __name__ == "__main__":
except ValueError:
port = 7171
uvicorn.run(app, host=host, port=port, log_level="info", access_log=True)
uvicorn.run(app, host=host, port=port, log_level=log_level_str.lower(), access_log=False)

View File

@@ -1,7 +1,3 @@
import logging
# Configure basic logging for the application if not already configured
# This remains safe to execute on import
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)

View File

@@ -41,12 +41,12 @@ class SSEBroadcaster:
async def add_client(self, queue: asyncio.Queue):
"""Add a new SSE client"""
self.clients.add(queue)
logger.info(f"SSE: Client connected (total: {len(self.clients)})")
logger.debug(f"SSE: Client connected (total: {len(self.clients)})")
async def remove_client(self, queue: asyncio.Queue):
"""Remove an SSE client"""
self.clients.discard(queue)
logger.info(f"SSE: Client disconnected (total: {len(self.clients)})")
logger.debug(f"SSE: Client disconnected (total: {len(self.clients)})")
async def broadcast_event(self, event_data: dict):
"""Broadcast an event to all connected clients"""
@@ -82,7 +82,6 @@ class SSEBroadcaster:
# Clean up disconnected clients
for client in disconnected:
self.clients.discard(client)
logger.info(
f"SSE Broadcaster: Successfully sent to {sent_count} clients, removed {len(disconnected)} disconnected clients"
)
@@ -178,7 +177,7 @@ def start_sse_redis_subscriber():
# Start Redis subscriber in background thread
thread = threading.Thread(target=redis_subscriber_thread, daemon=True)
thread.start()
logger.info("SSE Redis Subscriber: Background thread started")
logger.debug("SSE Redis Subscriber: Background thread started")
async def transform_callback_to_task_format(task_id: str, event_data: dict) -> dict:
@@ -496,7 +495,7 @@ def _build_task_response(
try:
item_id = item_url.split("/")[-1]
if item_id:
base_url = str(request.base_url).rstrip("/")
base_url = str(request.base_url).rstrip("/") if request else "http://localhost:7171"
dynamic_original_url = (
f"{base_url}/api/{download_type}/download/{item_id}"
)
@@ -574,9 +573,7 @@ def _build_task_response(
return task_response
async def get_paginated_tasks(
page=1, limit=20, active_only=False, request: Request = None
):
async def get_paginated_tasks(page=1, limit=20, active_only=False, request: Optional[Request] = None):
"""
Get paginated list of tasks.
"""
@@ -1069,11 +1066,9 @@ async def stream_task_updates(
try:
# Register this client with the broadcaster
logger.info("SSE Stream: New client connecting...")
logger.debug(f"SSE Stream: New client connecting...")
await sse_broadcaster.add_client(client_queue)
logger.info(
f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}"
)
logger.debug(f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}")
# Send initial data immediately upon connection
initial_data = await generate_task_update_event(
@@ -1110,9 +1105,7 @@ async def stream_task_updates(
}
event_json = json.dumps(callback_event)
yield f"data: {event_json}\n\n"
logger.info(
f"SSE Stream: Sent replay callback for task {task_id}"
)
logger.debug(f"SSE Stream: Sent replay callback for task {task_id}")
# Send periodic heartbeats and listen for real-time events
last_heartbeat = time.time()
@@ -1200,7 +1193,7 @@ async def stream_task_updates(
await asyncio.sleep(1)
except asyncio.CancelledError:
logger.info("SSE client disconnected")
logger.debug("SSE client disconnected")
return
except Exception as e:
logger.error(f"SSE connection error: {e}", exc_info=True)

View File

@@ -2,6 +2,7 @@ import subprocess
import logging
import time
import threading
import os
import sys
# Import Celery task utilities
@@ -41,8 +42,10 @@ class CeleryManager:
)
def _get_worker_command(
self, queues, concurrency, worker_name_suffix, log_level="INFO"
self, queues, concurrency, worker_name_suffix, log_level_env=None
):
# Use LOG_LEVEL from environment if provided, otherwise default to INFO
log_level = log_level_env if log_level_env else os.getenv("LOG_LEVEL", "WARNING").upper()
# Use a unique worker name to avoid conflicts.
# %h is replaced by celery with the actual hostname.
hostname = f"worker_{worker_name_suffix}@%h"
@@ -123,6 +126,7 @@ class CeleryManager:
queues="downloads",
concurrency=self.concurrency,
worker_name_suffix="dlw", # Download Worker
log_level_env=os.getenv("LOG_LEVEL", "WARNING").upper(),
)
logger.info(
f"Starting Celery Download Worker with command: {' '.join(download_cmd)}"
@@ -157,7 +161,8 @@ class CeleryManager:
queues="utility_tasks,default", # Listen to utility and default
concurrency=5, # Increased concurrency for SSE updates and utility tasks
worker_name_suffix="utw", # Utility Worker
log_level="ERROR", # Reduce log verbosity for utility worker (only errors)
log_level_env=os.getenv("LOG_LEVEL", "ERROR").upper(),
)
logger.info(
f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}"
@@ -256,7 +261,7 @@ class CeleryManager:
# Restart only the download worker
download_cmd = self._get_worker_command(
"downloads", self.concurrency, "dlw"
"downloads", self.concurrency, "dlw", log_level_env=os.getenv("LOG_LEVEL", "WARNING").upper()
)
logger.info(
f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}"
@@ -372,10 +377,7 @@ celery_manager = CeleryManager()
# Example of how to use the manager (typically called from your main app script)
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
# Removed logging.basicConfig as it's handled by the main app's setup_logging
logger.info("Starting Celery Manager example...")
celery_manager.start()
try:

View File

@@ -246,7 +246,7 @@ class CeleryDownloadQueueManager:
"""Initialize the Celery-based download queue manager"""
self.max_concurrent = MAX_CONCURRENT_DL
self.paused = False
print(
logger.info(
f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}"
)