fix: global logging level

This commit is contained in:
Phlogi
2025-08-23 19:37:42 +02:00
parent 7025b273b3
commit 13680ddd26
5 changed files with 48 additions and 37 deletions

34
app.py
View File

@@ -13,6 +13,16 @@ import redis
import socket import socket
from urllib.parse import urlparse from urllib.parse import urlparse
# Define a mapping from string log levels to logging constants
LOG_LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
"NOTSET": logging.NOTSET,
}
# Run DB migrations as early as possible, before importing any routers that may touch DBs # Run DB migrations as early as possible, before importing any routers that may touch DBs
try: try:
from routes.migrations import run_migrations_if_needed from routes.migrations import run_migrations_if_needed
@@ -27,13 +37,18 @@ except Exception as e:
) )
sys.exit(1) sys.exit(1)
# Get log level from environment variable, default to INFO
log_level_str = os.getenv("LOG_LEVEL", "WARNING").upper()
log_level = LOG_LEVELS.get(log_level_str, logging.INFO)
# Import route routers (to be created) # Import route routers (to be created)
from routes.auth.credentials import router as credentials_router from routes.auth.credentials import router as credentials_router
from routes.auth.auth import router as auth_router from routes.auth.auth import router as auth_router
from routes.content.artist import router as artist_router
from routes.content.album import router as album_router from routes.content.album import router as album_router
from routes.content.artist import router as artist_router
from routes.content.track import router as track_router from routes.content.track import router as track_router
from routes.content.playlist import router as playlist_router from routes.content.playlist import router as playlist_router
from routes.content.bulk_add import router as bulk_add_router
from routes.core.search import router as search_router from routes.core.search import router as search_router
from routes.core.history import router as history_router from routes.core.history import router as history_router
from routes.system.progress import router as prgs_router from routes.system.progress import router as prgs_router
@@ -66,7 +81,7 @@ def setup_logging():
# Configure root logger # Configure root logger
root_logger = logging.getLogger() root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG) root_logger.setLevel(log_level)
# Clear any existing handlers from the root logger # Clear any existing handlers from the root logger
if root_logger.hasHandlers(): if root_logger.hasHandlers():
@@ -83,12 +98,12 @@ def setup_logging():
main_log, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8" main_log, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8"
) )
file_handler.setFormatter(log_format) file_handler.setFormatter(log_format)
file_handler.setLevel(logging.INFO) file_handler.setLevel(log_level)
# Console handler for stderr # Console handler for stderr
console_handler = logging.StreamHandler(sys.stderr) console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(log_format) console_handler.setFormatter(log_format)
console_handler.setLevel(logging.INFO) console_handler.setLevel(log_level)
# Add handlers to root logger # Add handlers to root logger
root_logger.addHandler(file_handler) root_logger.addHandler(file_handler)
@@ -101,10 +116,15 @@ def setup_logging():
"routes.utils.celery_manager", "routes.utils.celery_manager",
"routes.utils.celery_tasks", "routes.utils.celery_tasks",
"routes.utils.watch", "routes.utils.watch",
"uvicorn", # General Uvicorn logger
"uvicorn.access", # Uvicorn access logs
"uvicorn.error", # Uvicorn error logs
]: ]:
logger = logging.getLogger(logger_name) logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO) logger.setLevel(log_level)
logger.propagate = True # Propagate to root logger # For uvicorn.access, we explicitly set propagate to False to prevent duplicate logging
# if access_log=False is used in uvicorn.run, and to ensure our middleware handles it.
logger.propagate = False if logger_name == "uvicorn.access" else True
logging.info("Logging system initialized") logging.info("Logging system initialized")
@@ -363,4 +383,4 @@ if __name__ == "__main__":
except ValueError: except ValueError:
port = 7171 port = 7171
uvicorn.run(app, host=host, port=port, log_level="info", access_log=True) uvicorn.run(app, host=host, port=port, log_level=log_level_str.lower(), access_log=False)

View File

@@ -1,7 +1,3 @@
import logging import logging
# Configure basic logging for the application if not already configured
# This remains safe to execute on import
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@@ -31,12 +31,12 @@ class SSEBroadcaster:
async def add_client(self, queue: asyncio.Queue): async def add_client(self, queue: asyncio.Queue):
"""Add a new SSE client""" """Add a new SSE client"""
self.clients.add(queue) self.clients.add(queue)
logger.info(f"SSE: Client connected (total: {len(self.clients)})") logger.debug(f"SSE: Client connected (total: {len(self.clients)})")
async def remove_client(self, queue: asyncio.Queue): async def remove_client(self, queue: asyncio.Queue):
"""Remove an SSE client""" """Remove an SSE client"""
self.clients.discard(queue) self.clients.discard(queue)
logger.info(f"SSE: Client disconnected (total: {len(self.clients)})") logger.debug(f"SSE: Client disconnected (total: {len(self.clients)})")
async def broadcast_event(self, event_data: dict): async def broadcast_event(self, event_data: dict):
"""Broadcast an event to all connected clients""" """Broadcast an event to all connected clients"""
@@ -69,7 +69,7 @@ class SSEBroadcaster:
for client in disconnected: for client in disconnected:
self.clients.discard(client) self.clients.discard(client)
logger.info(f"SSE Broadcaster: Successfully sent to {sent_count} clients, removed {len(disconnected)} disconnected clients") logger.debug(f"SSE Broadcaster: Successfully sent to {sent_count} clients, removed {len(disconnected)} disconnected clients")
# Global broadcaster instance # Global broadcaster instance
sse_broadcaster = SSEBroadcaster() sse_broadcaster = SSEBroadcaster()
@@ -139,7 +139,7 @@ def start_sse_redis_subscriber():
# Start Redis subscriber in background thread # Start Redis subscriber in background thread
thread = threading.Thread(target=redis_subscriber_thread, daemon=True) thread = threading.Thread(target=redis_subscriber_thread, daemon=True)
thread.start() thread.start()
logger.info("SSE Redis Subscriber: Background thread started") logger.debug("SSE Redis Subscriber: Background thread started")
async def transform_callback_to_task_format(task_id: str, event_data: dict) -> Optional[dict]: async def transform_callback_to_task_format(task_id: str, event_data: dict) -> Optional[dict]:
"""Transform callback event data into the task format expected by frontend""" """Transform callback event data into the task format expected by frontend"""
@@ -200,13 +200,7 @@ async def trigger_sse_update(task_id: str, reason: str = "task_update"):
last_status = get_last_task_status(task_id) last_status = get_last_task_status(task_id)
# Create a dummy request for the _build_task_response function # Create a dummy request for the _build_task_response function
from fastapi import Request task_response = _build_task_response(task_info, last_status, task_id, current_time, request=None)
class DummyRequest:
def __init__(self):
self.base_url = "http://localhost:7171"
dummy_request = DummyRequest()
task_response = _build_task_response(task_info, last_status, task_id, current_time, dummy_request)
# Create minimal event data - global counts will be added at broadcast time # Create minimal event data - global counts will be added at broadcast time
event_data = { event_data = {
@@ -431,7 +425,7 @@ def _build_error_callback_object(last_status):
return callback_object return callback_object
def _build_task_response(task_info, last_status, task_id, current_time, request: Request): def _build_task_response(task_info, last_status, task_id, current_time, request: Optional[Request] = None):
""" """
Helper function to build a standardized task response object. Helper function to build a standardized task response object.
""" """
@@ -444,7 +438,7 @@ def _build_task_response(task_info, last_status, task_id, current_time, request:
try: try:
item_id = item_url.split("/")[-1] item_id = item_url.split("/")[-1]
if item_id: if item_id:
base_url = str(request.base_url).rstrip("/") base_url = str(request.base_url).rstrip("/") if request else "http://localhost:7171"
dynamic_original_url = ( dynamic_original_url = (
f"{base_url}/api/{download_type}/download/{item_id}" f"{base_url}/api/{download_type}/download/{item_id}"
) )
@@ -496,7 +490,7 @@ def _build_task_response(task_info, last_status, task_id, current_time, request:
return task_response return task_response
async def get_paginated_tasks(page=1, limit=20, active_only=False, request: Request = None): async def get_paginated_tasks(page=1, limit=20, active_only=False, request: Optional[Request] = None):
""" """
Get paginated list of tasks. Get paginated list of tasks.
""" """
@@ -938,9 +932,9 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get
try: try:
# Register this client with the broadcaster # Register this client with the broadcaster
logger.info(f"SSE Stream: New client connecting...") logger.debug(f"SSE Stream: New client connecting...")
await sse_broadcaster.add_client(client_queue) await sse_broadcaster.add_client(client_queue)
logger.info(f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}") logger.debug(f"SSE Stream: Client registered successfully, total clients: {len(sse_broadcaster.clients)}")
# Send initial data immediately upon connection # Send initial data immediately upon connection
initial_data = await generate_task_update_event(time.time(), active_only, request) initial_data = await generate_task_update_event(time.time(), active_only, request)
@@ -973,7 +967,7 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get
} }
event_json = json.dumps(callback_event) event_json = json.dumps(callback_event)
yield f"data: {event_json}\n\n" yield f"data: {event_json}\n\n"
logger.info(f"SSE Stream: Sent replay callback for task {task_id}") logger.debug(f"SSE Stream: Sent replay callback for task {task_id}")
# Send periodic heartbeats and listen for real-time events # Send periodic heartbeats and listen for real-time events
last_heartbeat = time.time() last_heartbeat = time.time()
@@ -1039,7 +1033,7 @@ async def stream_task_updates(request: Request, current_user: User = Depends(get
await asyncio.sleep(1) await asyncio.sleep(1)
except asyncio.CancelledError: except asyncio.CancelledError:
logger.info("SSE client disconnected") logger.debug("SSE client disconnected")
return return
except Exception as e: except Exception as e:
logger.error(f"SSE connection error: {e}", exc_info=True) logger.error(f"SSE connection error: {e}", exc_info=True)

View File

@@ -2,6 +2,7 @@ import subprocess
import logging import logging
import time import time
import threading import threading
import os
# Import Celery task utilities # Import Celery task utilities
from .celery_config import get_config_params, MAX_CONCURRENT_DL from .celery_config import get_config_params, MAX_CONCURRENT_DL
@@ -40,8 +41,10 @@ class CeleryManager:
) )
def _get_worker_command( def _get_worker_command(
self, queues, concurrency, worker_name_suffix, log_level="INFO" self, queues, concurrency, worker_name_suffix, log_level_env=None
): ):
# Use LOG_LEVEL from environment if provided, otherwise default to INFO
log_level = log_level_env if log_level_env else os.getenv("LOG_LEVEL", "WARNING").upper()
# Use a unique worker name to avoid conflicts. # Use a unique worker name to avoid conflicts.
# %h is replaced by celery with the actual hostname. # %h is replaced by celery with the actual hostname.
hostname = f"worker_{worker_name_suffix}@%h" hostname = f"worker_{worker_name_suffix}@%h"
@@ -117,6 +120,7 @@ class CeleryManager:
queues="downloads", queues="downloads",
concurrency=self.concurrency, concurrency=self.concurrency,
worker_name_suffix="dlw", # Download Worker worker_name_suffix="dlw", # Download Worker
log_level_env=os.getenv("LOG_LEVEL", "WARNING").upper(),
) )
logger.info( logger.info(
f"Starting Celery Download Worker with command: {' '.join(download_cmd)}" f"Starting Celery Download Worker with command: {' '.join(download_cmd)}"
@@ -151,7 +155,7 @@ class CeleryManager:
queues="utility_tasks,default", # Listen to utility and default queues="utility_tasks,default", # Listen to utility and default
concurrency=5, # Increased concurrency for SSE updates and utility tasks concurrency=5, # Increased concurrency for SSE updates and utility tasks
worker_name_suffix="utw", # Utility Worker worker_name_suffix="utw", # Utility Worker
log_level="ERROR" # Reduce log verbosity for utility worker (only errors) log_level_env=os.getenv("LOG_LEVEL", "WARNING").upper(),
) )
logger.info( logger.info(
f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}" f"Starting Celery Utility Worker with command: {' '.join(utility_cmd)}"
@@ -250,7 +254,7 @@ class CeleryManager:
# Restart only the download worker # Restart only the download worker
download_cmd = self._get_worker_command( download_cmd = self._get_worker_command(
"downloads", self.concurrency, "dlw" "downloads", self.concurrency, "dlw", log_level_env=os.getenv("LOG_LEVEL", "WARNING").upper()
) )
logger.info( logger.info(
f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}" f"Restarting Celery Download Worker with command: {' '.join(download_cmd)}"
@@ -366,10 +370,7 @@ celery_manager = CeleryManager()
# Example of how to use the manager (typically called from your main app script) # Example of how to use the manager (typically called from your main app script)
if __name__ == "__main__": if __name__ == "__main__":
logging.basicConfig( # Removed logging.basicConfig as it's handled by the main app's setup_logging
level=logging.INFO,
format="%(message)s",
)
logger.info("Starting Celery Manager example...") logger.info("Starting Celery Manager example...")
celery_manager.start() celery_manager.start()
try: try:

View File

@@ -246,7 +246,7 @@ class CeleryDownloadQueueManager:
"""Initialize the Celery-based download queue manager""" """Initialize the Celery-based download queue manager"""
self.max_concurrent = MAX_CONCURRENT_DL self.max_concurrent = MAX_CONCURRENT_DL
self.paused = False self.paused = False
print( logger.info(
f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}" f"Celery Download Queue Manager initialized with max_concurrent={self.max_concurrent}"
) )