Merge pull request #311 from Phlogi/my-playlists-3
enhance global logging level
This commit is contained in:
23
README.md
23
README.md
@@ -60,7 +60,7 @@ Access logs via Docker:
|
|||||||
docker logs spotizerr
|
docker logs spotizerr
|
||||||
```
|
```
|
||||||
|
|
||||||
**Log Locations:**
|
**Log and File Locations:**
|
||||||
- Application Logs: `docker logs spotizerr` (main app and Celery workers)
|
- Application Logs: `docker logs spotizerr` (main app and Celery workers)
|
||||||
- Individual Task Logs: `./logs/tasks/` (inside container, maps to your volume)
|
- Individual Task Logs: `./logs/tasks/` (inside container, maps to your volume)
|
||||||
- Credentials: `./data/creds/`
|
- Credentials: `./data/creds/`
|
||||||
@@ -70,6 +70,12 @@ docker logs spotizerr
|
|||||||
- Download History Database: `./data/history/`
|
- Download History Database: `./data/history/`
|
||||||
- Spotify Token Cache: `./.cache/` (if `SPOTIPY_CACHE_PATH` is mapped)
|
- Spotify Token Cache: `./.cache/` (if `SPOTIPY_CACHE_PATH` is mapped)
|
||||||
|
|
||||||
|
**Global Logging Level:**
|
||||||
|
The application's global logging level can be controlled via the `LOG_LEVEL` environment variable.
|
||||||
|
Supported values (case-insensitive): `CRITICAL`, `ERROR`, `WARNING`, `INFO`, `DEBUG`, `NOTSET`.
|
||||||
|
If not set, the default logging level is `WARNING`.
|
||||||
|
Example in `.env` file: `LOG_LEVEL=DEBUG`
|
||||||
|
|
||||||
## 🤝 Contributing
|
## 🤝 Contributing
|
||||||
|
|
||||||
1. Fork the repository
|
1. Fork the repository
|
||||||
@@ -77,6 +83,21 @@ docker logs spotizerr
|
|||||||
3. Make your changes
|
3. Make your changes
|
||||||
4. Submit a pull request
|
4. Submit a pull request
|
||||||
|
|
||||||
|
Here is the text to add to your `README.md` file, preferably after the "Quick Start" section:
|
||||||
|
|
||||||
|
## 💻 Development Setup
|
||||||
|
|
||||||
|
To run Spotizerr in development mode:
|
||||||
|
|
||||||
|
1. **Backend (API):**
|
||||||
|
* Ensure Python dependencies are installed (e.g., using `uv pip install -r requirements.txt`).
|
||||||
|
* Start a Redis server.
|
||||||
|
* Run the app insidie your activated virtual env: `python3 app.py`
|
||||||
|
2. **Frontend (UI):**
|
||||||
|
* Navigate to `spotizerr-ui/`.
|
||||||
|
* Install dependencies: `pnpm install`.
|
||||||
|
* Start the development server: `pnpm dev`.
|
||||||
|
|
||||||
## 📄 License
|
## 📄 License
|
||||||
|
|
||||||
This project is licensed under the GPL yada yada, see [LICENSE](LICENSE) file for details.
|
This project is licensed under the GPL yada yada, see [LICENSE](LICENSE) file for details.
|
||||||
|
|||||||
70
app.py
70
app.py
@@ -13,6 +13,34 @@ import redis
|
|||||||
import socket
|
import socket
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
# Define a mapping from string log levels to logging constants
|
||||||
|
LOG_LEVELS = {
|
||||||
|
"CRITICAL": logging.CRITICAL,
|
||||||
|
"ERROR": logging.ERROR,
|
||||||
|
"WARNING": logging.WARNING,
|
||||||
|
"INFO": logging.INFO,
|
||||||
|
"DEBUG": logging.DEBUG,
|
||||||
|
"NOTSET": logging.NOTSET,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run DB migrations as early as possible, before importing any routers that may touch DBs
|
||||||
|
try:
|
||||||
|
from routes.migrations import run_migrations_if_needed
|
||||||
|
|
||||||
|
run_migrations_if_needed()
|
||||||
|
logging.getLogger(__name__).info(
|
||||||
|
"Database migrations executed (if needed) early in startup."
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logging.getLogger(__name__).error(
|
||||||
|
f"Database migration step failed early in startup: {e}", exc_info=True
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Get log level from environment variable, default to INFO
|
||||||
|
log_level_str = os.getenv("LOG_LEVEL", "WARNING").upper()
|
||||||
|
log_level = LOG_LEVELS.get(log_level_str, logging.INFO)
|
||||||
|
|
||||||
# Apply process umask from environment as early as possible
|
# Apply process umask from environment as early as possible
|
||||||
_umask_value = os.getenv("UMASK")
|
_umask_value = os.getenv("UMASK")
|
||||||
if _umask_value:
|
if _umask_value:
|
||||||
@@ -22,7 +50,32 @@ if _umask_value:
|
|||||||
# Defer logging setup; avoid failing on invalid UMASK
|
# Defer logging setup; avoid failing on invalid UMASK
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# Import and initialize routes (this will start the watch manager)
|
# Import and initialize routes (this will start the watch manager)
|
||||||
|
from routes.auth.credentials import router as credentials_router
|
||||||
|
from routes.auth.auth import router as auth_router
|
||||||
|
from routes.content.album import router as album_router
|
||||||
|
from routes.content.artist import router as artist_router
|
||||||
|
from routes.content.track import router as track_router
|
||||||
|
from routes.content.playlist import router as playlist_router
|
||||||
|
from routes.content.bulk_add import router as bulk_add_router
|
||||||
|
from routes.core.search import router as search_router
|
||||||
|
from routes.core.history import router as history_router
|
||||||
|
from routes.system.progress import router as prgs_router
|
||||||
|
from routes.system.config import router as config_router
|
||||||
|
|
||||||
|
|
||||||
|
# Import Celery configuration and manager
|
||||||
|
from routes.utils.celery_manager import celery_manager
|
||||||
|
from routes.utils.celery_config import REDIS_URL
|
||||||
|
|
||||||
|
# Import authentication system
|
||||||
|
from routes.auth import AUTH_ENABLED
|
||||||
|
from routes.auth.middleware import AuthMiddleware
|
||||||
|
|
||||||
|
# Import watch manager controls (start/stop) without triggering side effects
|
||||||
|
from routes.utils.watch.manager import start_watch_manager, stop_watch_manager
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Configure application-wide logging
|
# Configure application-wide logging
|
||||||
@@ -48,7 +101,7 @@ def setup_logging():
|
|||||||
|
|
||||||
# Configure root logger
|
# Configure root logger
|
||||||
root_logger = logging.getLogger()
|
root_logger = logging.getLogger()
|
||||||
root_logger.setLevel(logging.DEBUG)
|
root_logger.setLevel(log_level)
|
||||||
|
|
||||||
# Clear any existing handlers from the root logger
|
# Clear any existing handlers from the root logger
|
||||||
if root_logger.hasHandlers():
|
if root_logger.hasHandlers():
|
||||||
@@ -65,12 +118,12 @@ def setup_logging():
|
|||||||
main_log, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8"
|
main_log, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8"
|
||||||
)
|
)
|
||||||
file_handler.setFormatter(log_format)
|
file_handler.setFormatter(log_format)
|
||||||
file_handler.setLevel(logging.INFO)
|
file_handler.setLevel(log_level)
|
||||||
|
|
||||||
# Console handler for stderr
|
# Console handler for stderr
|
||||||
console_handler = logging.StreamHandler(sys.stderr)
|
console_handler = logging.StreamHandler(sys.stderr)
|
||||||
console_handler.setFormatter(log_format)
|
console_handler.setFormatter(log_format)
|
||||||
console_handler.setLevel(logging.INFO)
|
console_handler.setLevel(log_level)
|
||||||
|
|
||||||
# Add handlers to root logger
|
# Add handlers to root logger
|
||||||
root_logger.addHandler(file_handler)
|
root_logger.addHandler(file_handler)
|
||||||
@@ -83,10 +136,15 @@ def setup_logging():
|
|||||||
"routes.utils.celery_manager",
|
"routes.utils.celery_manager",
|
||||||
"routes.utils.celery_tasks",
|
"routes.utils.celery_tasks",
|
||||||
"routes.utils.watch",
|
"routes.utils.watch",
|
||||||
|
"uvicorn", # General Uvicorn logger
|
||||||
|
"uvicorn.access", # Uvicorn access logs
|
||||||
|
"uvicorn.error", # Uvicorn error logs
|
||||||
]:
|
]:
|
||||||
logger = logging.getLogger(logger_name)
|
logger = logging.getLogger(logger_name)
|
||||||
logger.setLevel(logging.INFO)
|
logger.setLevel(log_level)
|
||||||
logger.propagate = True # Propagate to root logger
|
# For uvicorn.access, we explicitly set propagate to False to prevent duplicate logging
|
||||||
|
# if access_log=False is used in uvicorn.run, and to ensure our middleware handles it.
|
||||||
|
logger.propagate = False if logger_name == "uvicorn.access" else True
|
||||||
|
|
||||||
logging.info("Logging system initialized")
|
logging.info("Logging system initialized")
|
||||||
|
|
||||||
@@ -386,4 +444,4 @@ if __name__ == "__main__":
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
port = 7171
|
port = 7171
|
||||||
|
|
||||||
uvicorn.run(app, host=host, port=port, log_level="info", access_log=True)
|
uvicorn.run(app, host=host, port=port, log_level=log_level_str.lower(), access_log=False)
|
||||||
|
|||||||
@@ -1,7 +1,3 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
# Configure basic logging for the application if not already configured
|
|
||||||
# This remains safe to execute on import
|
|
||||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|||||||
@@ -41,13 +41,13 @@ class SSEBroadcaster:
|
|||||||
async def add_client(self, queue: asyncio.Queue):
|
async def add_client(self, queue: asyncio.Queue):
|
||||||
"""Add a new SSE client"""
|
"""Add a new SSE client"""
|
||||||
self.clients.add(queue)
|
self.clients.add(queue)
|
||||||
logger.info(f"SSE: Client connected (total: {len(self.clients)})")
|
logger.debug(f"SSE: Client connected (total: {len(self.clients)})")
|
||||||
|
|
||||||
async def remove_client(self, queue: asyncio.Queue):
|
async def remove_client(self, queue: asyncio.Queue):
|
||||||
"""Remove an SSE client"""
|
"""Remove an SSE client"""
|
||||||
self.clients.discard(queue)
|
self.clients.discard(queue)
|
||||||
logger.info(f"SSE: Client disconnected (total: {len(self.clients)})")
|
logger.debug(f"SSE: Client disconnected (total: {len(self.clients)})")
|
||||||
|
|
||||||
async def broadcast_event(self, event_data: dict):
|
async def broadcast_event(self, event_data: dict):
|
||||||
"""Broadcast an event to all connected clients"""
|
"""Broadcast an event to all connected clients"""
|
||||||
logger.debug(
|
logger.debug(
|
||||||
@@ -82,7 +82,6 @@ class SSEBroadcaster:
|
|||||||
# Clean up disconnected clients
|
# Clean up disconnected clients
|
||||||
for client in disconnected:
|
for client in disconnected:
|
||||||
self.clients.discard(client)
|
self.clients.discard(client)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"SSE Broadcaster: Successfully sent to {sent_count} clients, removed {len(disconnected)} disconnected clients"
|
f"SSE Broadcaster: Successfully sent to {sent_count} clients, removed {len(disconnected)} disconnected clients"
|
||||||
)
|
)
|
||||||
@@ -178,7 +177,7 @@ def start_sse_redis_subscriber():
|
|||||||
# Start Redis subscriber in background thread
|
# Start Redis subscriber in background thread
|
||||||
thread = threading.Thread(target=redis_subscriber_thread, daemon=True)
|
thread = threading.Thread(target=redis_subscriber_thread, daemon=True)
|
||||||
thread.start()
|
thread.start()
|
||||||
logger.info("SSE Redis Subscriber: Background thread started")
|
logger.debug("SSE Redis Subscriber: Background thread started")
|
||||||
|
|
||||||
|
|
||||||
async def transform_callback_to_task_format(task_id: str, event_data: dict) -> dict:
|
async def transform_callback_to_task_format(task_id: str, event_data: dict) -> dict:
|
||||||
@@ -496,7 +495,7 @@ def _build_task_response(
|
|||||||
try:
|
try:
|
||||||
item_id = item_url.split("/")[-1]
|
item_id = item_url.split("/")[-1]
|
||||||
if item_id:
|
if item_id:
|
||||||
base_url = str(request.base_url).rstrip("/")
|
base_url = str(request.base_url).rstrip("/") if request else "http://localhost:7171"
|
||||||
dynamic_original_url = (
|
dynamic_original_url = (
|
||||||
f"{base_url}/api/{download_type}/download/{item_id}"
|
f"{base_url}/api/{download_type}/download/{item_id}"
|
||||||
)
|
)
|
||||||
@@ -574,9 +573,7 @@ def _build_task_response(
|
|||||||
return task_response
|
return task_response
|
||||||
|
|
||||||
|
|
||||||
async def get_paginated_tasks(
|
async def get_paginated_tasks(page=1, limit=20, active_only=False, request: Optional[Request] = None):
|
||||||
page=1, limit=20, active_only=False, request: Request = None
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Get paginated list of tasks.
|
Get paginated list of tasks.
|
||||||
"""
|
"""
|
||||||
@@ -1069,12 +1066,10 @@ async def stream_task_updates(
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Register this client with the broadcaster
|
# Register this client with the broadcaster
|
||||||
logger.info("SSE Stream: New client connecting...")
|
logger.debug(f"SSE Stream: New client connecting...")
|
||||||
await sse_broadcaster.add_client(client_queue)
|
await sse_broadcaster.add_client(client_queue)
|
||||||
logger.info(
|
logger.debug(f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}")
|
||||||
f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Send initial data immediately upon connection
|
# Send initial data immediately upon connection
|
||||||
initial_data = await generate_task_update_event(
|
initial_data = await generate_task_update_event(
|
||||||
time.time(), active_only, request
|
time.time(), active_only, request
|
||||||
@@ -1110,10 +1105,8 @@ async def stream_task_updates(
|
|||||||
}
|
}
|
||||||
event_json = json.dumps(callback_event)
|
event_json = json.dumps(callback_event)
|
||||||
yield f"data: {event_json}\n\n"
|
yield f"data: {event_json}\n\n"
|
||||||
logger.info(
|
logger.debug(f"SSE Stream: Sent replay callback for task {task_id}")
|
||||||
f"SSE Stream: Sent replay callback for task {task_id}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Send periodic heartbeats and listen for real-time events
|
# Send periodic heartbeats and listen for real-time events
|
||||||
last_heartbeat = time.time()
|
last_heartbeat = time.time()
|
||||||
heartbeat_interval = 30.0
|
heartbeat_interval = 30.0
|
||||||
@@ -1200,7 +1193,7 @@ async def stream_task_updates(
|
|||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
logger.info("SSE client disconnected")
|
logger.debug("SSE client disconnected")
|
||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"SSE connection error: {e}", exc_info=True)
|
logger.error(f"SSE connection error: {e}", exc_info=True)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import subprocess
|
|||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
# Import Celery task utilities
|
# Import Celery task utilities
|
||||||
@@ -41,8 +42,10 @@ class CeleryManager:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _get_worker_command(
|
def _get_worker_command(
|
||||||
self, queues, concurrency, worker_name_suffix, log_level="INFO"
|
self, queues, concurrency, worker_name_suffix, log_level_env=None
|
||||||
):
|
):
|
||||||
|
# Use LOG_LEVEL from environment if provided, otherwise default to INFO
|
||||||
|
log_level = log_level_env if log_level_env else os.getenv("LOG_LEVEL", "WARNING").upper()
|
||||||
# Use a unique worker name to avoid conflicts.
|
# Use a unique worker name to avoid conflicts.
|
||||||
# %h is replaced by celery with the actual hostname.
|
# %h is replaced by celery with the actual hostname.
|
||||||
hostname = f"worker_{worker_name_suffix}@%h"
|
hostname = f"worker_{worker_name_suffix}@%h"
|
||||||
@@ -123,6 +126,7 @@ class CeleryManager:
|
|||||||
queues="downloads",
|
queues="downloads",
|
||||||
concurrency=self.concurrency,
|
concurrency=self.concurrency,
|
||||||
worker_name_suffix="dlw", # Download Worker
|
worker_name_suffix="dlw", # Download Worker
|
||||||
|
log_level_env=os.getenv("LOG_LEVEL", "WARNING").upper(),
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Starting Celery Download Worker with command: {' '.join(download_cmd)}"
|
f"Starting Celery Download Worker with command: {' '.join(download_cmd)}"
|
||||||
@@ -157,7 +161,8 @@ class CeleryManager:
|
|||||||
queues="utility_tasks,default", # Listen to utility and default
|
queues="utility_tasks,default", # Listen to utility and default
|
||||||
concurrency=5, # Increased concurrency for SSE updates and utility tasks
|
concurrency=5, # Increased concurrency for SSE updates and utility tasks
|
||||||
worker_name_suffix="utw", # Utility Worker
|
worker_name_suffix="utw", # Utility Worker
|
||||||
log_level="ERROR", # Reduce log verbosity for utility worker (only errors)
|
log_level_env=os.getenv("LOG_LEVEL", "ERROR").upper(),
|
||||||
|
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}"
|
f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}"
|
||||||
@@ -256,7 +261,7 @@ class CeleryManager:
|
|||||||
|
|
||||||
# Restart only the download worker
|
# Restart only the download worker
|
||||||
download_cmd = self._get_worker_command(
|
download_cmd = self._get_worker_command(
|
||||||
"downloads", self.concurrency, "dlw"
|
"downloads", self.concurrency, "dlw", log_level_env=os.getenv("LOG_LEVEL", "WARNING").upper()
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}"
|
f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}"
|
||||||
@@ -372,10 +377,7 @@ celery_manager = CeleryManager()
|
|||||||
|
|
||||||
# Example of how to use the manager (typically called from your main app script)
|
# Example of how to use the manager (typically called from your main app script)
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.basicConfig(
|
# Removed logging.basicConfig as it's handled by the main app's setup_logging
|
||||||
level=logging.INFO,
|
|
||||||
format="%(message)s",
|
|
||||||
)
|
|
||||||
logger.info("Starting Celery Manager example...")
|
logger.info("Starting Celery Manager example...")
|
||||||
celery_manager.start()
|
celery_manager.start()
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -246,7 +246,7 @@ class CeleryDownloadQueueManager:
|
|||||||
"""Initialize the Celery-based download queue manager"""
|
"""Initialize the Celery-based download queue manager"""
|
||||||
self.max_concurrent = MAX_CONCURRENT_DL
|
self.max_concurrent = MAX_CONCURRENT_DL
|
||||||
self.paused = False
|
self.paused = False
|
||||||
print(
|
logger.info(
|
||||||
f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}"
|
f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user