From 40a6d036be259fd7b05b15743ecb36d77879eb13 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 00:22:06 -0400 Subject: [PATCH 01/34] feat: implement core bot infrastructure and monitoring system - Add comprehensive task management system - Implement distributed tracing and monitoring - Refactor bot core with new sentry integration - Update database controllers with tracing support - Rename emoji.py to emoji_manager.py --- tux/app.py | 184 +++--- tux/bot.py | 803 ++++++++++++----------- tux/cog_loader.py | 624 ++++++++++-------- tux/database/controllers/__init__.py | 126 ++-- tux/database/controllers/base.py | 41 +- tux/database/controllers/levels.py | 22 + tux/utils/banner.py | 5 +- tux/utils/context_utils.py | 108 +++ tux/utils/{emoji.py => emoji_manager.py} | 0 tux/utils/hot_reload.py | 2 +- tux/utils/protocols.py | 32 + tux/utils/sentry_manager.py | 560 ++++++++++++++++ tux/utils/task_manager.py | 685 +++++++++++++++++++ tux/utils/tracing.py | 518 +++++++++++++++ 14 files changed, 2887 insertions(+), 823 deletions(-) create mode 100644 tux/utils/context_utils.py rename tux/utils/{emoji.py => emoji_manager.py} (100%) create mode 100644 tux/utils/protocols.py create mode 100644 tux/utils/sentry_manager.py create mode 100644 tux/utils/task_manager.py create mode 100644 tux/utils/tracing.py diff --git a/tux/app.py b/tux/app.py index 91eb4b4a0..5059ab790 100644 --- a/tux/app.py +++ b/tux/app.py @@ -1,130 +1,91 @@ -"""TuxApp: Orchestration and lifecycle management for the Tux Discord bot.""" +""" +TuxApp: Main application entrypoint and lifecycle orchestrator. + +This module contains the `TuxApp` class, which serves as the primary entrypoint +for the Tux Discord bot. It is responsible for: + +- **Environment Setup**: Validating configuration, initializing Sentry, and setting + up OS-level signal handlers for graceful shutdown. +- **Bot Instantiation**: Creating the instance of the `Tux` bot class with the + appropriate intents, command prefix logic, and owner IDs. +- **Lifecycle Management**: Starting the asyncio event loop and managing the + bot's main `start` and `shutdown` sequence, including handling `KeyboardInterrupt`. +""" import asyncio import signal -from types import FrameType import discord -import sentry_sdk from loguru import logger from tux.bot import Tux from tux.help import TuxHelp from tux.utils.config import CONFIG -from tux.utils.env import get_current_env - - -async def get_prefix(bot: Tux, message: discord.Message) -> list[str]: - """Resolve the command prefix for a guild or use the default prefix.""" - prefix: str | None = None - if message.guild: - try: - from tux.database.controllers import DatabaseController # noqa: PLC0415 - - prefix = await DatabaseController().guild_config.get_guild_prefix(message.guild.id) - except Exception as e: - logger.error(f"Error getting guild prefix: {e}") - return [prefix or CONFIG.DEFAULT_PREFIX] +from tux.utils.sentry_manager import SentryManager class TuxApp: - """Orchestrates the startup, shutdown, and environment for the Tux bot.""" - - def __init__(self): - """Initialize the TuxApp with no bot instance yet.""" - self.bot = None - - def run(self) -> None: - """Run the Tux bot application (entrypoint for CLI).""" - asyncio.run(self.start()) + """ + Orchestrates the startup, shutdown, and environment for the Tux bot. - def setup_sentry(self) -> None: - """Initialize Sentry for error monitoring and tracing.""" - if not CONFIG.SENTRY_DSN: - logger.warning("No Sentry DSN configured, skipping Sentry setup") - return - - logger.info("Setting up Sentry...") - - try: - sentry_sdk.init( - dsn=CONFIG.SENTRY_DSN, - release=CONFIG.BOT_VERSION, - environment=get_current_env(), - enable_tracing=True, - attach_stacktrace=True, - send_default_pii=False, - traces_sample_rate=1.0, - profiles_sample_rate=1.0, - _experiments={ - "enable_logs": True, # https://docs.sentry.io/platforms/python/logs/ - }, - ) - - # Add additional global tags - sentry_sdk.set_tag("discord_library_version", discord.__version__) + This class is not a `discord.py` cog, but rather a top-level application + runner that manages the bot's entire lifecycle from an OS perspective. + """ - logger.info(f"Sentry initialized: {sentry_sdk.is_initialized()}") + # --- Initialization --- - except Exception as e: - logger.error(f"Failed to initialize Sentry: {e}") - - def setup_signals(self) -> None: - """Set up signal handlers for graceful shutdown.""" - signal.signal(signal.SIGTERM, self.handle_sigterm) - signal.signal(signal.SIGINT, self.handle_sigterm) - - def handle_sigterm(self, signum: int, frame: FrameType | None) -> None: - """Handle SIGTERM/SIGINT by raising KeyboardInterrupt for graceful shutdown.""" - logger.info(f"Received signal {signum}") + def __init__(self): + """Initializes the TuxApp, setting the bot instance to None initially.""" + self.bot: Tux | None = None - if sentry_sdk.is_initialized(): - with sentry_sdk.push_scope() as scope: - scope.set_tag("signal.number", signum) - scope.set_tag("lifecycle.event", "termination_signal") + # --- Application Lifecycle --- - sentry_sdk.add_breadcrumb( - category="lifecycle", - message=f"Received termination signal {signum}", - level="info", - ) + def run(self) -> None: + """ + The main synchronous entrypoint for the application. - raise KeyboardInterrupt + This method starts the asyncio event loop and runs the primary `start` + coroutine, effectively launching the bot. + """ + asyncio.run(self.start()) - def validate_config(self) -> bool: - """Validate that all required configuration is present.""" - if not CONFIG.BOT_TOKEN: - logger.critical("No bot token provided. Set DEV_BOT_TOKEN or PROD_BOT_TOKEN in your .env file.") - return False + async def start(self) -> None: + """ + The main asynchronous entrypoint for the application. - return True + This method orchestrates the entire bot startup sequence: setting up + Sentry and signal handlers, validating config, creating the `Tux` + instance, and connecting to Discord. It includes a robust + try/except/finally block to ensure graceful shutdown. + """ - async def start(self) -> None: - """Start the Tux bot, handling setup, errors, and shutdown.""" - self.setup_sentry() + # Initialize Sentry + SentryManager.setup() + # Set up signal handlers self.setup_signals() + # Validate config if not self.validate_config(): return + # Configure owner IDs, dynamically adding sysadmins if configured. + # This allows specified users to have access to sensitive commands like `eval`. owner_ids = {CONFIG.BOT_OWNER_ID} - if CONFIG.ALLOW_SYSADMINS_EVAL: logger.warning( - "⚠️ Eval is enabled for sysadmins, this is potentially dangerous; see settings.yml.example for more info.", + "⚠️ Eval is enabled for sysadmins, this is potentially dangerous; " + "see settings.yml.example for more info.", ) owner_ids.update(CONFIG.SYSADMIN_IDS) - else: logger.warning("🔒️ Eval is disabled for sysadmins; see settings.yml.example for more info.") + # Instantiate the main bot class with all necessary parameters. self.bot = Tux( - command_prefix=get_prefix, strip_after_prefix=True, case_insensitive=True, intents=discord.Intents.all(), - # owner_ids={CONFIG.BOT_OWNER_ID, *CONFIG.SYSADMIN_IDS}, owner_ids=owner_ids, allowed_mentions=discord.AllowedMentions(everyone=False), help_command=TuxHelp(), @@ -132,25 +93,60 @@ async def start(self) -> None: status=discord.Status.online, ) + # Start the bot try: + # This is the main blocking call that connects to Discord and runs the bot. await self.bot.start(CONFIG.BOT_TOKEN, reconnect=True) except KeyboardInterrupt: + # This is caught when the user presses Ctrl+C. logger.info("Shutdown requested (KeyboardInterrupt)") except Exception as e: - logger.critical(f"Bot failed to start: {e}") - await self.shutdown() - + # Catch any other unexpected exception during bot runtime. + logger.critical(f"Bot failed to start or run: {e}") finally: + # Ensure that shutdown is always called to clean up resources. await self.shutdown() async def shutdown(self) -> None: - """Gracefully shut down the bot and flush Sentry.""" + """ + Gracefully shuts down the bot and its resources. + + This involves calling the bot's internal shutdown sequence and then + flushing any remaining Sentry events to ensure all data is sent. + """ if self.bot and not self.bot.is_closed(): await self.bot.shutdown() - if sentry_sdk.is_initialized(): - sentry_sdk.flush() - await asyncio.sleep(0.1) + SentryManager.flush() + await asyncio.sleep(0.1) # Brief pause to allow buffers to flush logger.info("Shutdown complete") + + # --- Environment Setup --- + + def setup_signals(self) -> None: + """ + Sets up OS-level signal handlers for graceful shutdown. + + This ensures that when the bot process receives a SIGINT (Ctrl+C) or + SIGTERM (from systemd or Docker), it is intercepted and handled + cleanly instead of causing an abrupt exit. + """ + signal.signal(signal.SIGTERM, SentryManager.report_signal) + signal.signal(signal.SIGINT, SentryManager.report_signal) + + def validate_config(self) -> bool: + """ + Performs a pre-flight check for essential configuration. + + Returns + ------- + bool + True if the configuration is valid, False otherwise. + """ + if not CONFIG.BOT_TOKEN: + logger.critical("No bot token provided. Set DEV_BOT_TOKEN or PROD_BOT_TOKEN in your .env file.") + return False + + return True diff --git a/tux/bot.py b/tux/bot.py index 2e2f49491..3b1da6238 100644 --- a/tux/bot.py +++ b/tux/bot.py @@ -1,7 +1,19 @@ -"""Tux Discord bot core implementation. - -Defines the Tux bot class, which extends discord.py's Bot and manages -setup, cog loading, error handling, and resource cleanup. +""" +Tux Discord bot core implementation. + +This module defines the primary `Tux` class, which serves as the central orchestrator +for the entire bot application. It extends `discord.py`'s `commands.Bot` and is +responsible for the following key areas: + +- **Lifecycle Management**: Handles the startup and shutdown sequences, ensuring + that all sub-systems are initialized and terminated gracefully. +- **Component Orchestration**: Initializes and holds instances of various manager + classes (e.g., `TaskManager`, `SentryManager`, `EmojiManager`) that encapsulate + specific functionalities. +- **Cog Loading**: Triggers the loading of all command cogs from the `tux/cogs` + directory via the `CogLoader`. +- **Event Handling**: Implements core `discord.py` event listeners like `on_ready` + and `setup_hook` to manage the bot's state as it interacts with Discord. """ from __future__ import annotations @@ -9,29 +21,51 @@ import asyncio import contextlib from collections.abc import Callable, Coroutine +from dataclasses import dataclass +from enum import Enum, auto from typing import Any import discord -import sentry_sdk from discord.ext import commands, tasks from loguru import logger from rich.console import Console -from tux.cog_loader import CogLoader from tux.database.client import db +from tux.database.controllers import DatabaseController from tux.utils.banner import create_banner -from tux.utils.config import Config -from tux.utils.emoji import EmojiManager +from tux.utils.config import CONFIG, Config +from tux.utils.emoji_manager import EmojiManager from tux.utils.env import is_dev_mode -from tux.utils.sentry import start_span, start_transaction - -# Create console for rich output -console = Console(stderr=True, force_terminal=True) +from tux.utils.sentry_manager import SentryManager +from tux.utils.task_manager import TaskManager +from tux.utils.tracing import instrument_bot_commands, start_span, start_transaction # Type hint for discord.ext.tasks.Loop type TaskLoop = tasks.Loop[Callable[[], Coroutine[Any, Any, None]]] +class TaskCategory(Enum): + """Categories for background tasks.""" + + SCHEDULED = auto() + GATEWAY = auto() + SYSTEM = auto() + COMMAND = auto() + UNKNOWN = auto() + + +@dataclass +class BotState: + """Manages the bot's operational state flags.""" + + is_shutting_down: bool = False + setup_complete: bool = False + start_time: float | None = None + emoji_manager_initialized: bool = False + hot_reload_loaded: bool = False + banner_logged: bool = False + + class DatabaseConnectionError(RuntimeError): """Raised when database connection fails.""" @@ -40,158 +74,154 @@ class DatabaseConnectionError(RuntimeError): class Tux(commands.Bot): """ - Main bot class for Tux, extending discord.py's Bot. + The main class for the Tux Discord bot. - Handles setup, cog loading, error handling, Sentry tracing, and resource cleanup. + This class extends `discord.py`'s `commands.Bot` and serves as the central + orchestrator for the application. It is responsible for initializing + sub-systems (like database, Sentry, emoji management), loading cogs, + handling the bot's lifecycle (setup, shutdown), and processing events. """ + # --- Initialization --- + def __init__(self, *args: Any, **kwargs: Any) -> None: - """Initialize the Tux bot and start setup process.""" - super().__init__(*args, **kwargs) - # Core state - self.is_shutting_down: bool = False - self.setup_complete: bool = False - self.start_time: float | None = None + """ + Initializes the Tux bot, its managers, and lifecycle steps. + + This sets up the core state, managers (Sentry, Emoji, Task), and defines + the sequence of operations for the bot's startup and shutdown routines. + It also creates and schedules the main setup task. + """ + super().__init__(*args, **kwargs, command_prefix=self._get_prefix) + + # Core bot state flags, managed by the BotState dataclass. + self.state = BotState() + self.prefix_cache: dict[int, str] = {} self.setup_task: asyncio.Task[None] | None = None - self.active_sentry_transactions: dict[int, Any] = {} - - self._emoji_manager_initialized = False - self._hot_reload_loaded = False - self._banner_logged = False - self._startup_task = None + self._startup_task: asyncio.Task[None] | None = None + # Sub-systems and managers that encapsulate specific functionalities. + self.sentry_manager = SentryManager() self.emoji_manager = EmojiManager(self) + self.task_manager = TaskManager(self) self.console = Console(stderr=True, force_terminal=True) + # Bot lifecycle routines are defined as lists of (name, function) tuples. + # This makes the setup and shutdown sequences clear and easy to modify. + self.setup_steps = [ + ("database", self._setup_database), + ("jishaku", self._load_jishaku), + ("cogs", self._load_cogs), + ("hot_reload", self._setup_hot_reload), + ("monitoring", self.task_manager.start), + ("instrument_tasks", self.task_manager.setup_task_instrumentation), + ("instrument_commands", lambda: instrument_bot_commands(self)), + ] + + self.shutdown_steps = [ + ("handle_setup_task", self._handle_setup_task), + ("cleanup_tasks", self.task_manager.cancel_all_tasks), + ("close_connections", self._close_connections), + ] + + # The main setup routine is started as a background task immediately. logger.debug("Creating bot setup task") self.setup_task = asyncio.create_task(self.setup(), name="bot_setup") self.setup_task.add_done_callback(self._setup_callback) + # --- Setup & Lifecycle --- + async def setup(self) -> None: - """Set up the bot: connect to database, load extensions, and start monitoring.""" + """ + Executes the bot's startup routine in a defined sequence. + + This method iterates through the `setup_steps` list, awaiting each + asynchronous setup method to ensure the bot is properly initialized + before it goes online. If any step fails, the setup is aborted, and + a graceful shutdown is triggered. + + Raises + ------ + Exception + Propagates any exception that occurs during a setup step, + which is then caught, logged, and triggers a shutdown. + """ try: with start_span("bot.setup", "Bot setup process") as span: - span.set_tag("setup_phase", "starting") - await self._setup_database() - span.set_tag("setup_phase", "database_connected") - await self._load_extensions() - span.set_tag("setup_phase", "extensions_loaded") - await self._load_cogs() - span.set_tag("setup_phase", "cogs_loaded") - await self._setup_hot_reload() - span.set_tag("setup_phase", "hot_reload_ready") - self._start_monitoring() - span.set_tag("setup_phase", "monitoring_started") + for name, step_func in self.setup_steps: + span.set_tag("setup_phase", f"{name}_starting") + if asyncio.iscoroutinefunction(step_func): + await step_func() + else: + step_func() + span.set_tag("setup_phase", f"{name}_finished") except Exception as e: + # If any part of the setup fails, log the critical error + # and initiate a graceful shutdown to prevent a partial startup. logger.critical(f"Critical error during setup: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.set_context("setup_failure", {"error": str(e), "error_type": type(e).__name__}) - sentry_sdk.capture_exception(e) + self.sentry_manager.set_context("setup_failure", {"error": str(e), "error_type": type(e).__name__}) + self.sentry_manager.capture_exception(e) await self.shutdown() raise - async def _setup_database(self) -> None: - """Set up and validate the database connection.""" - with start_span("bot.database_connect", "Setting up database connection") as span: - logger.info("Setting up database connection...") - - try: - await db.connect() - self._validate_db_connection() - - span.set_tag("db.connected", db.is_connected()) - span.set_tag("db.registered", db.is_registered()) - - logger.info(f"Database connected: {db.is_connected()}") - logger.info(f"Database models registered: {db.is_registered()}") - - except Exception as e: - span.set_status("internal_error") - span.set_data("error", str(e)) - raise - - async def _load_extensions(self) -> None: - """Load bot extensions and cogs, including Jishaku for debugging.""" - with start_span("bot.load_jishaku", "Loading jishaku debug extension") as span: - try: - await self.load_extension("jishaku") - logger.info("Successfully loaded jishaku extension") - span.set_tag("jishaku.loaded", True) - - except commands.ExtensionError as e: - logger.warning(f"Failed to load jishaku: {e}") - span.set_tag("jishaku.loaded", False) - span.set_data("error", str(e)) - - def _start_monitoring(self) -> None: - """Start the background task monitoring loop.""" - self._monitor_tasks_loop.start() - logger.debug("Task monitoring started") - - @staticmethod - def _validate_db_connection() -> None: - """Raise if the database is not connected or registered.""" - if not db.is_connected() or not db.is_registered(): - raise DatabaseConnectionError(DatabaseConnectionError.CONNECTION_FAILED) + async def shutdown(self) -> None: + """ + Executes the bot's shutdown routine in a defined sequence. + + This method ensures that all resources are cleaned up properly, + including cancelling tasks, stopping task loops, and closing + database and Discord connections. The `is_shutting_down` flag + prevents this from running more than once. + """ + with start_transaction("bot.shutdown", "Bot shutdown process") as transaction: + # The is_shutting_down flag prevents re-entrant calls to shutdown. + if self.state.is_shutting_down: + logger.info("Shutdown already in progress. Exiting.") + transaction.set_data("already_shutting_down", True) + return - def _setup_callback(self, task: asyncio.Task[None]) -> None: - """Handle setup task completion and update setup_complete flag.""" - try: - task.result() - self.setup_complete = True - logger.info("Bot setup completed successfully") + self.state.is_shutting_down = True + transaction.set_tag("shutdown_initiated", True) + logger.info("Shutting down...") - if sentry_sdk.is_initialized(): - sentry_sdk.set_tag("bot.setup_complete", True) + # Iterate through the defined shutdown steps. + for name, step_func in self.shutdown_steps: + transaction.set_tag(f"{name}_handled", False) + await step_func() + transaction.set_tag(f"{name}_handled", True) - except Exception as e: - logger.critical(f"Setup failed: {e}") - self.setup_complete = False + logger.info("Bot shutdown complete.") - if sentry_sdk.is_initialized(): - sentry_sdk.set_tag("bot.setup_complete", False) - sentry_sdk.set_tag("bot.setup_failed", True) - sentry_sdk.capture_exception(e) + # --- Event Handlers --- async def setup_hook(self) -> None: - """discord.py setup_hook: one-time async setup before connecting to Discord.""" - if not self._emoji_manager_initialized: + """ + Performs one-time async setup before connecting to Discord. + + This `discord.py` hook is used to initialize the emoji manager and + schedule the `_post_ready_startup` task, which runs after both the + internal setup and the Discord connection are fully established. + """ + # Initialize the emoji manager as soon as the bot's loop is running. + if not self.state.emoji_manager_initialized: await self.emoji_manager.init() - self._emoji_manager_initialized = True + self.state.emoji_manager_initialized = True + # The `_post_ready_startup` task should only be created once. + # This check prevents it from being recreated on subsequent reconnects. if self._startup_task is None or self._startup_task.done(): self._startup_task = self.loop.create_task(self._post_ready_startup()) - async def _post_ready_startup(self): - """Run after the bot is fully ready: log banner, set Sentry stats.""" - await self.wait_until_ready() # Wait for Discord connection and READY event - - # Also wait for internal bot setup (cogs, db, etc.) to complete - await self._wait_for_setup() - - if not self.start_time: - self.start_time = discord.utils.utcnow().timestamp() - - if not self._banner_logged: - await self._log_startup_banner() - self._banner_logged = True - - if sentry_sdk.is_initialized(): - sentry_sdk.set_context( - "bot_stats", - { - "guild_count": len(self.guilds), - "user_count": len(self.users), - "channel_count": sum(len(g.channels) for g in self.guilds), - "uptime": discord.utils.utcnow().timestamp() - (self.start_time or 0), - }, - ) - async def on_ready(self) -> None: - """Handle bot ready event.""" + """ + Called when the bot is ready and connected to Discord. + + This sets the bot's presence and indicates that it is online. + It waits for the internal setup to complete before proceeding. + """ await self._wait_for_setup() # Set bot status @@ -199,156 +229,114 @@ async def on_ready(self) -> None: await self.change_presence(activity=activity, status=discord.Status.online) async def on_disconnect(self) -> None: - """Log and report when the bot disconnects from Discord.""" - logger.warning("Bot has disconnected from Discord.") - - if sentry_sdk.is_initialized(): - with sentry_sdk.push_scope() as scope: - scope.set_tag("event_type", "disconnect") - scope.set_level("info") - sentry_sdk.capture_message( - "Bot disconnected from Discord, this happens sometimes and is fine as long as it's not happening too often", - ) - - # --- Sentry Transaction Tracking --- - - def start_interaction_transaction(self, interaction_id: int, name: str) -> Any: - """Start a Sentry transaction for a slash command interaction.""" - if not sentry_sdk.is_initialized(): - return None - - transaction = sentry_sdk.start_transaction( - op="slash_command", - name=f"Slash Command: {name}", - description=f"Processing slash command {name}", - ) - - transaction.set_tag("interaction.id", interaction_id) - transaction.set_tag("command.name", name) - transaction.set_tag("command.type", "slash") - - self.active_sentry_transactions[interaction_id] = transaction + """ + Logs and reports when the bot disconnects from Discord. - return transaction + This is a normal event during bot operation and is usually followed + by a reconnect, so it is logged as a warning. + """ + logger.info("Bot has disconnected from Discord.") - def start_command_transaction(self, message_id: int, name: str) -> Any: - """Start a Sentry transaction for a prefix command.""" - if not sentry_sdk.is_initialized(): - return None - - transaction = sentry_sdk.start_transaction( - op="prefix_command", - name=f"Prefix Command: {name}", - description=f"Processing prefix command {name}", + self.sentry_manager.capture_message( + "Bot disconnected from Discord, this happens sometimes and is fine as long as it's not happening too often", ) - transaction.set_tag("message.id", message_id) - transaction.set_tag("command.name", name) - transaction.set_tag("command.type", "prefix") - - self.active_sentry_transactions[message_id] = transaction - - return transaction + # --- Internal Setup & Shutdown Steps --- - def finish_transaction(self, transaction_id: int, status: str = "ok") -> None: - """Finish a stored Sentry transaction with the given status.""" - if not sentry_sdk.is_initialized(): - return + async def _setup_database(self) -> None: + """ + Connects to the database and validates the connection. + + Raises + ------ + Exception + Propagates any database connection errors from the client. + """ + with start_span("bot.database_connect", "Setting up database connection") as span: + logger.info("Setting up database connection...") - if transaction := self.active_sentry_transactions.pop(transaction_id, None): - transaction.set_status(status) - transaction.finish() + try: + await db.connect() + self._validate_db_connection() - async def _wait_for_setup(self) -> None: - """Wait for setup to complete if not already done.""" - if self.setup_task and not self.setup_task.done(): - with start_span("bot.wait_setup", "Waiting for setup to complete"): - try: - await self.setup_task + span.set_tag("db.connected", db.is_connected()) + span.set_tag("db.registered", db.is_registered()) - except Exception as e: - logger.critical(f"Setup failed during on_ready: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + logger.info(f"Database connected: {db.is_connected()}") + logger.info(f"Database models registered: {db.is_registered()}") - await self.shutdown() + except Exception as e: + span.set_status("internal_error") + span.set_data("error", str(e)) + raise - @tasks.loop(seconds=60) - async def _monitor_tasks_loop(self) -> None: - """Monitor and clean up running tasks every 60 seconds.""" - with start_span("bot.monitor_tasks", "Monitoring async tasks"): + async def _load_jishaku(self) -> None: + """Loads the Jishaku extension for debugging and development.""" + with start_span("bot.load_jishaku", "Loading jishaku debug extension") as span: try: - all_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()] - tasks_by_type = self._categorize_tasks(all_tasks) - await self._process_finished_tasks(tasks_by_type) + await self.load_extension("jishaku") + logger.info("Successfully loaded jishaku extension") + span.set_tag("jishaku.loaded", True) - except Exception as e: - logger.error(f"Task monitoring failed: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) - - msg = "Critical failure in task monitoring system" - raise RuntimeError(msg) from e - - def _categorize_tasks(self, tasks: list[asyncio.Task[Any]]) -> dict[str, list[asyncio.Task[Any]]]: - """Categorize tasks by their type for monitoring/cleanup.""" - tasks_by_type: dict[str, list[asyncio.Task[Any]]] = { - "SCHEDULED": [], - "GATEWAY": [], - "SYSTEM": [], - "COMMAND": [], - } - - for task in tasks: - if task.done(): - continue - - name = task.get_name() - - if name.startswith("discord-ext-tasks:"): - tasks_by_type["SCHEDULED"].append(task) - elif name.startswith(("discord.py:", "discord-voice-", "discord-gateway-")): - tasks_by_type["GATEWAY"].append(task) - elif "command_" in name.lower(): - tasks_by_type["COMMAND"].append(task) - else: - tasks_by_type["SYSTEM"].append(task) - - return tasks_by_type - - async def _process_finished_tasks(self, tasks_by_type: dict[str, list[asyncio.Task[Any]]]) -> None: - """Process and clean up finished tasks.""" - for task_list in tasks_by_type.values(): - for task in task_list: - if task.done(): - with contextlib.suppress(asyncio.CancelledError): - await task + except commands.ExtensionError as e: + logger.warning(f"Failed to load jishaku: {e}") + span.set_tag("jishaku.loaded", False) + span.set_data("error", str(e)) - async def shutdown(self) -> None: - """Gracefully shut down the bot and clean up resources.""" - with start_transaction("bot.shutdown", "Bot shutdown process") as transaction: - if self.is_shutting_down: - logger.info("Shutdown already in progress. Exiting.") - transaction.set_data("already_shutting_down", True) - return + async def _load_cogs(self) -> None: + """ + Loads all command cogs using the CogLoader utility. - self.is_shutting_down = True - transaction.set_tag("shutdown_initiated", True) - logger.info("Shutting down...") + Raises + ------ + Exception + Propagates any exceptions that occur during cog loading. + """ + from tux.cog_loader import CogLoader # noqa: PLC0415 - await self._handle_setup_task() - transaction.set_tag("setup_task_handled", True) + with start_span("bot.load_cogs", "Loading all cogs") as span: + logger.info("Loading cogs...") - await self._cleanup_tasks() - transaction.set_tag("tasks_cleaned", True) + try: + await CogLoader.setup(self) + span.set_tag("cogs_loaded", True) - await self._close_connections() - transaction.set_tag("connections_closed", True) + except Exception as e: + logger.critical(f"Error loading cogs: {e}") + span.set_tag("cogs_loaded", False) + span.set_data("error", str(e)) + self.sentry_manager.capture_exception(e) + raise - logger.info("Bot shutdown complete.") + async def _setup_hot_reload(self) -> None: + """ + Sets up the hot-reload system for development. + + This allows for automatic reloading of cogs and modules when files + are changed, speeding up the development workflow. + + Raises + ------ + Exception + Propagates exceptions from `load_extension` if hot-reload fails. + """ + if not self.state.hot_reload_loaded and "tux.utils.hot_reload" not in self.extensions: + with start_span("bot.setup_hot_reload", "Setting up hot reload system"): + try: + await self.load_extension("tux.utils.hot_reload") + self.state.hot_reload_loaded = True + logger.info("🔥 Hot reload system initialized") + except Exception as e: + logger.error(f"Failed to load hot reload extension: {e}") + self.sentry_manager.capture_exception(e) async def _handle_setup_task(self) -> None: - """Handle setup task during shutdown.""" + """ + Handles the main setup task during shutdown. + + If the bot is shut down while the initial setup is still running, + this method ensures the setup task is properly cancelled. + """ with start_span("bot.handle_setup_task", "Handling setup task during shutdown"): if self.setup_task and not self.setup_task.done(): self.setup_task.cancel() @@ -356,132 +344,126 @@ async def _handle_setup_task(self) -> None: with contextlib.suppress(asyncio.CancelledError): await self.setup_task - async def _cleanup_tasks(self) -> None: - """Clean up all running tasks.""" - with start_span("bot.cleanup_tasks", "Cleaning up running tasks"): - try: - await self._stop_task_loops() - - all_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()] - tasks_by_type = self._categorize_tasks(all_tasks) - - await self._cancel_tasks(tasks_by_type) - - except Exception as e: - logger.error(f"Error during task cleanup: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) - - async def _stop_task_loops(self) -> None: - """Stop all task loops in cogs.""" - with start_span("bot.stop_task_loops", "Stopping task loops"): - for cog_name in self.cogs: - cog = self.get_cog(cog_name) - if not cog: - continue - - for name, value in cog.__dict__.items(): - if isinstance(value, tasks.Loop): - try: - value.stop() - logger.debug(f"Stopped task loop {cog_name}.{name}") - - except Exception as e: - logger.error(f"Error stopping task loop {cog_name}.{name}: {e}") - - if hasattr(self, "_monitor_tasks_loop") and self._monitor_tasks_loop.is_running(): - self._monitor_tasks_loop.stop() - - async def _cancel_tasks(self, tasks_by_type: dict[str, list[asyncio.Task[Any]]]) -> None: - """Cancel tasks by category.""" - with start_span("bot.cancel_tasks", "Cancelling tasks by category") as span: - for task_type, task_list in tasks_by_type.items(): - if not task_list: - continue - - task_names: list[str] = [] - - for t in task_list: - name = t.get_name() or "unnamed" - if name in ("None", "unnamed"): - coro = t.get_coro() - name = getattr(coro, "__qualname__", str(coro)) - task_names.append(name) - names = ", ".join(task_names) - - logger.debug(f"Cancelling {len(task_list)} {task_type}: {names}") - span.set_data(f"tasks.{task_type.lower()}", task_names) - - for task in task_list: - task.cancel() - - results = await asyncio.gather(*task_list, return_exceptions=True) - - for result in results: - if isinstance(result, Exception) and not isinstance(result, asyncio.CancelledError): - logger.error(f"Exception during task cancellation for {task_type}: {result!r}") - - logger.debug(f"Cancelled {task_type}") - async def _close_connections(self) -> None: - """Close Discord and database connections.""" - with start_span("bot.close_connections", "Closing connections") as span: + """Closes Discord and database connections.""" + with start_span("bot.close_connections", "Closing connections"): + await self._close_discord() + await self._close_database() + + async def _close_discord(self) -> None: + """Closes the connection to the Discord Gateway.""" + with start_span("bot.close_discord", "Closing Discord connection") as span: try: - logger.debug("Closing Discord connections.") - + logger.debug("Closing Discord connection.") await self.close() - logger.debug("Discord connections closed.") + logger.debug("Discord connection closed.") span.set_tag("discord_closed", True) - except Exception as e: logger.error(f"Error during Discord shutdown: {e}") - span.set_tag("discord_closed", False) span.set_data("discord_error", str(e)) - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + self.sentry_manager.capture_exception(e) + + async def _close_database(self) -> None: + """Closes the database connection pool.""" + with start_span("bot.close_database", "Closing database connection") as span: + if not db.is_connected(): + logger.warning("Database was not connected, no disconnect needed.") + span.set_tag("db_connected", False) + return try: - logger.debug("Closing database connections.") - - if db.is_connected(): - await db.disconnect() - - logger.debug("Database connections closed.") - span.set_tag("db_closed", True) - - else: - logger.warning("Database was not connected, no disconnect needed.") - span.set_tag("db_connected", False) - + logger.debug("Closing database connection.") + await db.disconnect() + logger.debug("Database connection closed.") + span.set_tag("db_closed", True) except Exception as e: logger.critical(f"Error during database disconnection: {e}") span.set_tag("db_closed", False) span.set_data("db_error", str(e)) + self.sentry_manager.capture_exception(e) - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # --- Internal Helpers --- - async def _load_cogs(self) -> None: - """Load bot cogs using CogLoader.""" - with start_span("bot.load_cogs", "Loading all cogs") as span: - logger.info("Loading cogs...") + def _setup_callback(self, task: asyncio.Task[None]) -> None: + """ + A callback that runs upon completion of the main setup task. - try: - await CogLoader.setup(self) - span.set_tag("cogs_loaded", True) + This updates the bot's state to reflect whether the setup + was successful or failed. - except Exception as e: - logger.critical(f"Error loading cogs: {e}") - span.set_tag("cogs_loaded", False) - span.set_data("error", str(e)) + Parameters + ---------- + task : asyncio.Task[None] + The setup task that has completed. + """ + try: + task.result() + self.state.setup_complete = True + logger.info("Bot setup completed successfully") + self.sentry_manager.set_tag("bot.setup_complete", True) - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) - raise + except Exception as e: + logger.critical(f"Setup failed: {e}") + self.state.setup_complete = False + + self.sentry_manager.set_tag("bot.setup_complete", False) + self.sentry_manager.set_tag("bot.setup_failed", True) + self.sentry_manager.capture_exception(e) + + async def _wait_for_setup(self) -> None: + """ + Waits for the internal setup task to complete before proceeding. + + This is a crucial step in event handlers like `on_ready` to ensure + that cogs, database connections, etc., are available before the bot + tries to interact with them. + """ + if self.setup_task and not self.setup_task.done(): + with start_span("bot.wait_setup", "Waiting for setup to complete"): + try: + await self.setup_task + + except Exception as e: + logger.critical(f"Setup failed during on_ready: {e}") + self.sentry_manager.capture_exception(e) + + await self.shutdown() + + async def _post_ready_startup(self): + """ + Runs tasks that require the bot to be fully online and ready. + + This method waits for two conditions: + 1. The bot is connected to the Discord Gateway (`wait_until_ready`). + 2. The bot has completed its own internal setup (`_wait_for_setup`). + + Once ready, it logs the startup banner and reports initial stats. + """ + await self.wait_until_ready() + + # Also wait for internal bot setup (cogs, db, etc.) to complete + await self._wait_for_setup() + + if not self.state.start_time: + self.state.start_time = discord.utils.utcnow().timestamp() + + if not self.state.banner_logged: + await self._log_startup_banner() + self.state.banner_logged = True + + self.sentry_manager.set_context( + "bot_stats", + { + "guild_count": len(self.guilds), + "user_count": len(self.users), + "channel_count": sum(len(g.channels) for g in self.guilds), + "uptime": discord.utils.utcnow().timestamp() - (self.state.start_time or 0), + }, + ) async def _log_startup_banner(self) -> None: - """Log bot startup information (banner, stats, etc.).""" + """Logs the bot's startup banner and stats to the console.""" with start_span("bot.log_banner", "Displaying startup banner"): banner = create_banner( bot_name=Config.BOT_NAME, @@ -493,17 +475,86 @@ async def _log_startup_banner(self) -> None: dev_mode=is_dev_mode(), ) - console.print(banner) + self.console.print(banner) - async def _setup_hot_reload(self) -> None: - """Set up hot reload system after all cogs are loaded.""" - if not self._hot_reload_loaded and "tux.utils.hot_reload" not in self.extensions: - with start_span("bot.setup_hot_reload", "Setting up hot reload system"): - try: - await self.load_extension("tux.utils.hot_reload") - self._hot_reload_loaded = True - logger.info("🔥 Hot reload system initialized") - except Exception as e: - logger.error(f"Failed to load hot reload extension: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + @staticmethod + def _validate_db_connection() -> None: + """ + Ensures the database is properly connected. + + Raises + ------ + DatabaseConnectionError + If the database is not connected or models are not registered. + """ + if not db.is_connected() or not db.is_registered(): + raise DatabaseConnectionError(DatabaseConnectionError.CONNECTION_FAILED) + + async def _get_prefix(self, bot: Tux, message: discord.Message) -> list[str]: + """ + Resolves the command prefix for a given message with caching. + + This method dynamically retrieves the command prefix for a guild, caching + the result to avoid repeated database lookups. It falls back to the + default prefix if one is not configured or if a database error occurs. + It also allows the bot to be invoked by mentioning it. + + Parameters + ---------- + bot : Tux + The instance of the bot. + message : discord.Message + The message to resolve the prefix for. + + Returns + ------- + list[str] + A list of command prefixes, including mentions. + """ + if not message.guild: + return commands.when_mentioned_or(CONFIG.DEFAULT_PREFIX)(self, message) + + # Check the cache for a stored prefix + if cached_prefix := self.prefix_cache.get(message.guild.id): + return commands.when_mentioned_or(cached_prefix)(self, message) + + # If not in cache, query the database + if db.is_connected(): + try: + if prefix := await DatabaseController().guild_config.get_guild_prefix(message.guild.id): + self.prefix_cache[message.guild.id] = prefix + return commands.when_mentioned_or(prefix)(self, message) + except Exception as e: + logger.error(f"Error getting guild prefix for guild {message.guild.id}: {e}") + self.sentry_manager.capture_exception(e) + + # Fallback to the default prefix if no custom one is found + return commands.when_mentioned_or(CONFIG.DEFAULT_PREFIX)(self, message) + + async def invoke(self, ctx: commands.Context[Any]) -> None: + """ + Overrides the default invoke method to wrap command execution in a Sentry transaction. + + This ensures that every command invocation is traced, allowing for performance + monitoring and capturing of related spans (e.g., database queries). + + Parameters + ---------- + ctx : commands.Context[Any] + The context of the command invocation. + """ + if not self.sentry_manager.is_initialized or not ctx.command: + await super().invoke(ctx) + return + + # Create a transaction for the command + op = "command" + name = ctx.command.qualified_name + description = ctx.message.content + + with start_transaction(op, name, description): + # Set comprehensive context using the SentryManager + self.sentry_manager.set_command_context(ctx) + + # Execute the original command invocation logic + await super().invoke(ctx) diff --git a/tux/cog_loader.py b/tux/cog_loader.py index b54e4195d..c5e4b5c24 100644 --- a/tux/cog_loader.py +++ b/tux/cog_loader.py @@ -1,39 +1,92 @@ +""" +CogLoader: A robust cog loader for the Tux bot. + +This module provides the `CogLoader` class, which is responsible for discovering, +loading, and managing all cogs (Discord.py extensions) for the bot. It includes +features for priority-based loading, performance tracking, and detailed error +reporting with Sentry integration. +""" + +from __future__ import annotations + import asyncio import time -import traceback from collections import defaultdict -from collections.abc import Sequence +from itertools import groupby from pathlib import Path -import aiofiles -import aiofiles.os -import sentry_sdk from discord.ext import commands from loguru import logger from tux.utils.config import CONFIG -from tux.utils.sentry import safe_set_name, span, start_span, transaction +from tux.utils.tracing import ( + capture_span_exception, + enhanced_span, + set_span_attributes, + span, + transaction, +) class CogLoadError(Exception): """Raised when a cog fails to load.""" - FAILED_TO_LOAD = "Failed to load cogs" - FAILED_TO_LOAD_FOLDER = "Failed to load cogs from folder" - FAILED_TO_INITIALIZE = "Failed to initialize cog loader" - def __init__(self, message: str) -> None: - self.message = message - super().__init__(self.message) +class CogLoadResult: + """ + Encapsulates the result of a cog loading operation. + + Attributes + ---------- + module : str + The full import path of the cog. + success : bool + Whether the cog loaded successfully. + load_time : float + The time taken to load the cog, in seconds. + error : Exception | None + The exception raised during loading, if any. + """ + + def __init__(self, module: str, success: bool, load_time: float, error: Exception | None = None) -> None: + self.module = module + self.success = success + self.load_time = load_time + self.error = error + + @property + def load_time_ms(self) -> float: + """ + Return the cog load time in milliseconds. + + Returns + ------- + float + The load time in milliseconds. + """ + return self.load_time * 1000 class CogLoader(commands.Cog): + """ + A robust cog loader with priority-based loading, performance tracking, + and detailed Sentry integration. + """ + + # --- Initialization --- + def __init__(self, bot: commands.Bot) -> None: + """ + Initializes the CogLoader. + + Parameters + ---------- + bot : commands.Bot + The bot instance. + """ self.bot = bot - self.cog_ignore_list: set[str] = CONFIG.COG_IGNORE_LIST - # Track load times for performance monitoring + self.cog_ignore_list: set[str] = set(CONFIG.COG_IGNORE_LIST) self.load_times: defaultdict[str, float] = defaultdict(float) - # Define load order priorities (higher number = higher priority) self.load_priorities = { "services": 90, "admin": 80, @@ -47,9 +100,31 @@ def __init__(self, bot: commands.Bot) -> None: "tools": 5, } - async def is_cog_eligible(self, filepath: Path) -> bool: + # --- Cog Discovery & Metadata --- + + @staticmethod + def _path_to_module(path: Path) -> str: """ - Checks if the specified file is an eligible cog. + Converts a Path object to a Python module path. + + Example: + tux/cogs/admin.py -> tux.cogs.admin + + Parameters + ---------- + path : Path + The file path to convert. + + Returns + ------- + str + The Python module import path. + """ + return ".".join(path.parts).removesuffix(".py") + + def _is_eligible_cog_file(self, filepath: Path) -> bool: + """ + Checks if a file is an eligible cog for loading. Parameters ---------- @@ -61,23 +136,98 @@ async def is_cog_eligible(self, filepath: Path) -> bool: bool True if the file is an eligible cog, False otherwise. """ - cog_name: str = filepath.stem + if filepath.suffix != ".py" or not filepath.is_file() or filepath.stem.startswith("_"): + return False + cog_name = filepath.stem if cog_name in self.cog_ignore_list: - logger.warning(f"Skipping {cog_name} as it is in the ignore list.") + logger.trace(f"Skipping {cog_name} as it is in the ignore list.") return False - return filepath.suffix == ".py" and not cog_name.startswith("_") and await aiofiles.os.path.isfile(filepath) + return True + + def _get_cog_priority(self, path: Path) -> int: + """ + Gets the loading priority for a cog based on its parent directory. + + Parameters + ---------- + path : Path + The path to the cog file. + + Returns + ------- + int + The priority value, or 0 if not specified. + """ + return self.load_priorities.get(path.parent.name, 0) + + def _discover_and_sort_cogs(self, path: Path) -> list[Path]: + """ + Discovers all eligible cogs in a directory and sorts them by priority. + + Parameters + ---------- + path : Path + The directory to search for cogs. + + Returns + ------- + list[Path] + A list of cog file paths, sorted by priority (descending). + """ + if not path.is_dir(): + return [] + + eligible_cogs = [f for f in path.rglob("*.py") if self._is_eligible_cog_file(f)] + return sorted(eligible_cogs, key=lambda p: (self._get_cog_priority(p), p.name), reverse=True) + + def _create_load_result( + self, + path: Path, + start_time: float, + success: bool = True, + error: Exception | None = None, + ) -> CogLoadResult: + """ + Creates a standardized CogLoadResult object. + + Parameters + ---------- + path : Path + The path to the cog file. + start_time : float + The time when the loading process started. + success : bool, optional + Whether the load was successful, by default True. + error : Exception | None, optional + The error that occurred, if any, by default None. + + Returns + ------- + CogLoadResult + The result object. + """ + module = self._path_to_module(path) + load_time = time.perf_counter() - start_time + return CogLoadResult(module, success, load_time, error) + + # --- Cog Operations --- @span("cog.load_single") - async def _load_single_cog(self, path: Path) -> None: + async def _load_single_cog(self, path: Path) -> CogLoadResult: """ - Load a single cog with timing and error tracking. + Loads a single cog with comprehensive error handling and timing. Parameters ---------- path : Path - The path to the cog to load. + The path to the cog file to load. + + Returns + ------- + CogLoadResult + The result of the loading operation. Raises ------ @@ -85,292 +235,254 @@ async def _load_single_cog(self, path: Path) -> None: If the cog fails to load. """ start_time = time.perf_counter() - - # Setup for Sentry tracing + module = self._path_to_module(path) cog_name = path.stem - # Add span tags for the current cog - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("cog.name", cog_name) - current_span.set_tag("cog.path", str(path)) + set_span_attributes({"cog.name": cog_name, "cog.path": str(path), "cog.module": module}) try: - # Get the path relative to the tux package - relative_path = path.relative_to(Path(__file__).parent) - - # Convert path to module format (e.g., tux.cogs.admin.dev) - module = f"tux.{str(relative_path).replace('/', '.').replace('\\', '.')[:-3]}" - - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("cog.module", module) - - # Check if this module or any parent module is already loaded - # This prevents duplicate loading of the same module - module_parts = module.split(".") - - for i in range(len(module_parts), 1, -1): - check_module = ".".join(module_parts[:i]) - if check_module in self.bot.extensions: - logger.warning(f"Skipping {module} as {check_module} is already loaded") - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("cog.status", "skipped") - current_span.set_tag("cog.skip_reason", "already_loaded") - current_span.set_data("already_loaded_module", check_module) - return - - # Actually load the extension - await self.bot.load_extension(name=module) - load_time = time.perf_counter() - start_time - self.load_times[module] = load_time - - # Add telemetry data to span - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("cog.status", "loaded") - current_span.set_data("load_time_ms", load_time * 1000) - current_span.set_data("load_time_s", load_time) - - logger.debug(f"Successfully loaded cog {module} in {load_time * 1000:.0f}ms") - + await self.bot.load_extension(module) except Exception as e: - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_status("internal_error") - current_span.set_tag("cog.status", "failed") - current_span.set_data("error", str(e)) - current_span.set_data("traceback", traceback.format_exc()) - - module_name = str(path) - error_msg = f"Failed to load cog {module_name}. Error: {e}\n{traceback.format_exc()}" - logger.error(error_msg) + result = self._create_load_result(path, start_time, success=False, error=e) + capture_span_exception(e, cog_status="failed", cog_name=cog_name, cog_module=module) + error_msg = f"Failed to load cog {module}." + logger.error(f"{error_msg} Error: {e}") raise CogLoadError(error_msg) from e - - def _get_cog_priority(self, path: Path) -> int: + else: + result = self._create_load_result(path, start_time) + self.load_times[module] = result.load_time + set_span_attributes({"cog.status": "loaded", "load_time_ms": result.load_time_ms}) + logger.debug(f"Successfully loaded cog {module} in {result.load_time_ms:.2f}ms") + return result + + @span("cog.unload_single") + async def _unload_single_cog(self, path: Path) -> bool: """ - Get the loading priority for a cog based on its category. + Unloads a single cog with enhanced tracing. Parameters ---------- path : Path - The path to the cog. + The path to the cog file to unload. Returns ------- - int - The priority value (higher = loaded earlier) + bool + True if the cog was unloaded successfully, False otherwise. """ - return self.load_priorities.get(path.parent.name, 0) + module = self._path_to_module(path) + set_span_attributes({"cog.module": module}) - @span("cog.load_group") - async def _load_cog_group(self, cogs: Sequence[Path]) -> None: + try: + await self.bot.unload_extension(module) + except commands.ExtensionNotLoaded: + logger.warning(f"Cog {module} is not loaded, cannot unload.") + return False + except Exception as e: + capture_span_exception(e, operation="unload", cog_module=module) + logger.error(f"Failed to unload cog {module}: {e}") + return False + else: + logger.info(f"Successfully unloaded cog: {module}") + return True + + @span("cog.reload_single") + async def reload_cog(self, path: Path) -> bool: """ - Load a group of cogs concurrently. + Reloads a single cog with comprehensive error handling. Parameters ---------- - cogs : Sequence[Path] - The cogs to load. - """ - if not cogs: - return + path : Path + The path to the cog file to reload. - # Add basic info for the group - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_data("cog_count", len(cogs)) + Returns + ------- + bool + True if the cog was reloaded successfully, False otherwise. + """ + module = self._path_to_module(path) + set_span_attributes({"cog.module": module}) - if categories := {cog.parent.name for cog in cogs if cog.parent}: - current_span.set_data("categories", list(categories)) + await self._unload_single_cog(path) - # Track cog group loading - start_time = time.perf_counter() - results = await asyncio.gather(*[self._load_single_cog(cog) for cog in cogs], return_exceptions=True) - end_time = time.perf_counter() - - # Calculate success/failure rates - success_count = len([r for r in results if not isinstance(r, Exception)]) - failure_count = len(results) - success_count - - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_data("load_time_s", end_time - start_time) - current_span.set_data("success_count", success_count) - current_span.set_data("failure_count", failure_count) - - # Log failures with proper context - for result, cog in zip(results, cogs, strict=False): - if isinstance(result, Exception): - logger.error(f"Error loading {cog}: {result}") - - async def _process_single_file(self, path: Path) -> None: - """Process a single file path.""" - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("path.is_dir", False) - if await self.is_cog_eligible(path): + try: await self._load_single_cog(path) + except CogLoadError: + return False + else: + logger.info(f"Successfully reloaded cog: {module}") + return True - async def _process_directory(self, path: Path) -> None: - """Process a directory of cogs.""" - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("path.is_dir", True) - - # Collect and sort eligible cogs by priority - cog_paths: list[tuple[int, Path]] = [ - (self._get_cog_priority(item), item) for item in path.rglob("*.py") if await self.is_cog_eligible(item) - ] - cog_paths.sort(key=lambda x: x[0], reverse=True) - - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_data("eligible_cog_count", len(cog_paths)) - - # Priority groups info for observability - priority_groups: dict[int, int] = {} - for priority, _ in cog_paths: - if priority in priority_groups: - priority_groups[priority] += 1 - else: - priority_groups[priority] = 1 - current_span.set_data("priority_groups", priority_groups) - - # Group and load cogs by priority - current_group: list[Path] = [] - current_priority: int | None = None - - for priority, cog_path in cog_paths: - if current_priority != priority and current_group: - await self._load_cog_group(current_group) - current_group = [] - current_priority = priority - current_group.append(cog_path) - - # Load final group - if current_group: - await self._load_cog_group(current_group) + # --- Loading Workflow --- - @span("cog.load_path") - async def load_cogs(self, path: Path) -> None: + @span("cog.load_directory") + async def _load_cogs_from_directory(self, path: Path) -> list[CogLoadResult]: """ - Recursively loads eligible cogs from the specified directory with concurrent loading. + Discovers, groups, and loads all eligible cogs from a directory. Parameters ---------- path : Path - The path to the directory containing cogs. - """ - # Add span context - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("cog.path", str(path)) - - try: - # Handle file vs directory paths differently - if not await aiofiles.os.path.isdir(path): - await self._process_single_file(path) - else: - await self._process_directory(path) - - except Exception as e: - path_str = path.as_posix() - logger.error(f"An error occurred while processing {path_str}: {e}") - - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_status("internal_error") - current_span.set_data("error", str(e)) - current_span.set_data("traceback", traceback.format_exc()) + The directory to load cogs from. - raise CogLoadError(CogLoadError.FAILED_TO_LOAD) from e + Returns + ------- + list[CogLoadResult] + A list of results for each cog loaded. + """ + eligible_cogs = self._discover_and_sort_cogs(path) + if not eligible_cogs: + return [] + + set_span_attributes({"eligible_cog_count": len(eligible_cogs)}) + + all_results: list[CogLoadResult] = [] + cogs_by_priority = groupby(eligible_cogs, key=self._get_cog_priority) + + for priority, cogs in cogs_by_priority: + cogs_to_load = list(cogs) + with enhanced_span("cog.load_priority_group", f"Loading priority {priority} cogs", priority=priority): + categories = {cog.parent.name for cog in cogs_to_load} + set_span_attributes({"cog_count": len(cogs_to_load), "categories": list(categories)}) + + start_time = time.perf_counter() + results = await asyncio.gather( + *[self._load_single_cog(cog) for cog in cogs_to_load], + return_exceptions=True, + ) + group_results = [result for result in results if isinstance(result, CogLoadResult)] + all_results.extend(group_results) + + set_span_attributes( + { + "load_time_s": time.perf_counter() - start_time, + "success_count": len(group_results), + "failure_count": len(results) - len(group_results), + }, + ) + return all_results - @transaction("cog.load_folder", description="Loading all cogs from folder") - async def load_cogs_from_folder(self, folder_name: str) -> None: + @span("cog.load_path") + async def load_cogs(self, path: Path) -> list[CogLoadResult]: """ - Loads cogs from the specified folder with timing. + Recursively loads eligible cogs from a directory or a single file. Parameters ---------- - folder_name : str - The name of the folder containing the cogs. - """ - # Add span info - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("cog.folder", folder_name) - # Use safe_set_name instead of direct set_name call - safe_set_name(current_span, f"Load Cogs: {folder_name}") - - start_time = time.perf_counter() - cog_path: Path = Path(__file__).parent / folder_name - - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_data("full_path", str(cog_path)) - - try: - await self.load_cogs(path=cog_path) - load_time = time.perf_counter() - start_time + path : Path + The path to the file or directory to load cogs from. - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_data("load_time_s", load_time) - current_span.set_data("load_time_ms", load_time * 1000) + Returns + ------- + list[CogLoadResult] + A list of results for each cog loaded. - if load_time: - logger.info(f"Loaded all cogs from {folder_name} in {load_time * 1000:.0f}ms") + Raises + ------ + CogLoadError + If a fatal error occurs during the loading process. + """ + set_span_attributes({"cog.path": str(path)}) - # Log individual cog load times for performance monitoring - slow_threshold = 1.0 # seconds - if slow_cogs := {k: v for k, v in self.load_times.items() if v > slow_threshold}: - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_data("slow_cogs", slow_cogs) - logger.warning(f"Slow loading cogs (>{slow_threshold * 1000:.0f}ms): {slow_cogs}") + if not path.exists(): + logger.warning(f"Cog path not found: {path}") + return [] + try: + if path.is_dir(): + return await self._load_cogs_from_directory(path) + if self._is_eligible_cog_file(path): + return [await self._load_single_cog(path)] except Exception as e: - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_status("internal_error") - current_span.set_data("error", str(e)) - current_span.set_data("traceback", traceback.format_exc()) + capture_span_exception(e, path=str(path), operation="load_cogs") + logger.error(f"An error occurred while processing {path.as_posix()}: {e}") + msg = f"Failed to load from {path.as_posix()}" + raise CogLoadError(msg) from e - logger.error(f"Failed to load cogs from folder {folder_name}: {e}") - raise CogLoadError(CogLoadError.FAILED_TO_LOAD_FOLDER) from e + logger.debug(f"Path {path} is not an eligible cog file or directory.") + return [] - @classmethod - @transaction("cog.setup", name="CogLoader Setup", description="Initialize CogLoader and load all cogs") - async def setup(cls, bot: commands.Bot) -> None: + @transaction("cog.load_folder", description="Loading all cogs from a folder") + async def load_cogs_from_folder(self, folder_name: str) -> list[CogLoadResult]: """ - Set up the cog loader and load all cogs. + Loads all cogs from a specified top-level folder. Parameters ---------- - bot : commands.Bot - The bot instance. - """ - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_tag("bot.id", bot.user.id if bot.user else "unknown") + folder_name : str + The name of the folder to load cogs from (e.g., "tux/cogs"). - start_time = time.perf_counter() - cog_loader = cls(bot) + Returns + ------- + list[CogLoadResult] + A list of results for each cog loaded. - try: - # Load handlers first (they have highest priority) - with start_span("cog.load_handlers", "Load handler cogs"): - await cog_loader.load_cogs_from_folder(folder_name="handlers") + Raises + ------ + CogLoadError + Propagates errors from the underlying `load_cogs` call. + """ + cog_path = Path(folder_name) + with enhanced_span("cog.folder_processing", f"Processing {folder_name}", folder=folder_name): + start_time = time.perf_counter() + try: + results = await self.load_cogs(path=cog_path) + except CogLoadError as e: + capture_span_exception(e, folder=folder_name, operation="load_folder") + logger.error(f"Failed to load cogs from folder {folder_name}: {e}") + raise + else: + load_time = time.perf_counter() - start_time + success_count = sum(r.success for r in results) + logger.info( + f"Loaded {success_count}/{len(results)} cogs from {folder_name} in {load_time * 1000:.2f}ms", + ) + return results - # Then load regular cogs - with start_span("cog.load_regular", "Load regular cogs"): - await cog_loader.load_cogs_from_folder(folder_name="cogs") + # --- Setup --- - # Finally, load cogs from the extensions folder - with start_span("cog.load_extensions", "Load extension cogs"): - await cog_loader.load_cogs_from_folder(folder_name="extensions") + @classmethod + @transaction("cog.setup", name="CogLoader Setup", description="Initialize and load all cogs") + async def setup(cls, bot: commands.Bot) -> CogLoader: + """ + Sets up the cog loader and loads all initial cogs for the bot. - total_time = time.perf_counter() - start_time + Parameters + ---------- + bot : commands.Bot + The bot instance to set up. - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_data("total_load_time_s", total_time) - current_span.set_data("total_load_time_ms", total_time * 1000) + Returns + ------- + CogLoader + The initialized CogLoader instance. - # Add the CogLoader itself as a cog for bot maintenance - with start_span("cog.register_loader", "Register CogLoader cog"): + Raises + ------ + CogLoadError + If a fatal error occurs during the setup process. + """ + with enhanced_span("cog.loader_init", "Initializing CogLoader", bot_id=bot.user.id if bot.user else "unknown"): + start_time = time.perf_counter() + cog_loader = cls(bot) + cog_folders = ["tux/handlers", "tux/cogs", "tux/extensions"] + try: + all_results: list[CogLoadResult] = [] + for folder in cog_folders: + folder_results = await cog_loader.load_cogs_from_folder(folder_name=folder) + all_results.extend(folder_results) + total_time = time.perf_counter() - start_time + total_cogs = len(all_results) + successful_cogs = sum(r.success for r in all_results) + logger.info( + f"Cog loading complete: {successful_cogs}/{total_cogs} cogs loaded in {total_time * 1000:.2f}ms", + ) + except Exception as e: + capture_span_exception(e, operation="cog_setup") + logger.opt(exception=e).critical("Failed to set up cog loader.") + msg = "Failed to initialize CogLoader" + raise CogLoadError(msg) from e + else: await bot.add_cog(cog_loader) - - logger.info(f"Total cog loading time: {total_time * 1000:.0f}ms") - - except Exception as e: - if sentry_sdk.is_initialized() and (current_span := sentry_sdk.get_current_span()): - current_span.set_status("internal_error") - current_span.set_data("error", str(e)) - current_span.set_data("traceback", traceback.format_exc()) - - logger.error(f"Failed to set up cog loader: {e}") - raise CogLoadError(CogLoadError.FAILED_TO_INITIALIZE) from e + return cog_loader diff --git a/tux/database/controllers/__init__.py b/tux/database/controllers/__init__.py index 445c4c84f..c09c08c9f 100644 --- a/tux/database/controllers/__init__.py +++ b/tux/database/controllers/__init__.py @@ -1,11 +1,7 @@ """Database controller module providing access to all model controllers.""" -import functools -import inspect from typing import Any, ClassVar, TypeVar -import sentry_sdk - from tux.database.controllers.afk import AfkController from tux.database.controllers.case import CaseController from tux.database.controllers.guild import GuildController @@ -15,6 +11,7 @@ from tux.database.controllers.reminder import ReminderController from tux.database.controllers.snippet import SnippetController from tux.database.controllers.starboard import StarboardController, StarboardMessageController +from tux.utils.tracing import span # Define a TypeVar that can be any BaseController subclass ControllerType = TypeVar("ControllerType") @@ -67,7 +64,9 @@ def __init__(self) -> None: def _get_controller(self, controller_type: type[ControllerType]) -> ControllerType: """ - Helper method to instantiate a controller with proper Sentry instrumentation. + Helper method to instantiate a controller with selective Sentry instrumentation. + + Only instruments meaningful database operations to reduce span noise. Parameters ---------- @@ -77,83 +76,62 @@ def _get_controller(self, controller_type: type[ControllerType]) -> ControllerTy Returns ------- ControllerType - The instantiated controller + The instantiated controller with selectively instrumented methods """ instance = controller_type() - if sentry_sdk.is_initialized(): - # Get all public methods to wrap - methods = [attr for attr in dir(instance) if callable(getattr(instance, attr)) and not attr.startswith("_")] - # Wrap each public method with Sentry transaction - for method_name in methods: + # Only instrument meaningful database operations + # Filter out utility methods that create noise + excluded_methods = { + "safe_get_attr", + "connect_or_create_relation", + "_add_include_arg_if_present", + "_build_find_args", + "_build_simple_args", + "_build_create_args", + "_build_update_args", + "_build_delete_args", + "_build_upsert_args", + "_execute_query", + "_set_scope_context", + } + + # Include patterns for meaningful operations + include_patterns = { + "get_", + "find_", + "create_", + "update_", + "delete_", + "count_", + "increment_", + "toggle_", + "lock_", + "unlock_", + "bulk_", + } + + # Get all public methods to potentially wrap + methods = [ + attr + for attr in dir(instance) + if callable(getattr(instance, attr)) and not attr.startswith("_") and attr not in excluded_methods + ] + + # Wrap only methods that match meaningful operation patterns + for method_name in methods: + if any(method_name.startswith(pattern) for pattern in include_patterns): original_method = getattr(instance, method_name) - # Use a factory function to capture loop variables - self._create_wrapped_method(instance, method_name, original_method) - return instance + # Create the operation name for the span + op = f"db.controller.{method_name}" - def _create_wrapped_method(self, instance: Any, method_name: str, original_method: Any) -> None: - """ - Create a wrapped method with proper sentry instrumentation. + # Apply the span decorator to the original method + wrapped_method = span(op=op)(original_method) - Parameters - ---------- - instance : Any - The controller instance - method_name : str - The name of the method to wrap - original_method : Any - The original method to wrap - """ + setattr(instance, method_name, wrapped_method) - # Check if the original method is async - is_async = inspect.iscoroutinefunction(original_method) - - if is_async: - - @functools.wraps(original_method) - async def async_wrapped_method(*args: Any, **kwargs: Any) -> Any: - controller_name = instance.__class__.__name__ - with sentry_sdk.start_span( - op=f"db.controller.{method_name}", - description=f"{controller_name}.{method_name}", - ) as span: - span.set_tag("db.controller", controller_name) - span.set_tag("db.operation", method_name) - try: - result = await original_method(*args, **kwargs) - except Exception as e: - span.set_status("internal_error") - span.set_data("error", str(e)) - raise - else: - span.set_status("ok") - return result - - setattr(instance, method_name, async_wrapped_method) - - else: - - @functools.wraps(original_method) - def sync_wrapped_method(*args: Any, **kwargs: Any) -> Any: - controller_name = instance.__class__.__name__ - with sentry_sdk.start_span( - op=f"db.controller.{method_name}", - description=f"{controller_name}.{method_name}", - ) as span: - span.set_tag("db.controller", controller_name) - span.set_tag("db.operation", method_name) - try: - result = original_method(*args, **kwargs) - except Exception as e: - span.set_status("internal_error") - span.set_data("error", str(e)) - raise - else: - span.set_status("ok") - return result - - setattr(instance, method_name, sync_wrapped_method) + return instance _controller_mapping: ClassVar[dict[str, type]] = { "afk": AfkController, diff --git a/tux/database/controllers/base.py b/tux/database/controllers/base.py index f407e480d..24d7a909e 100644 --- a/tux/database/controllers/base.py +++ b/tux/database/controllers/base.py @@ -3,7 +3,6 @@ from collections.abc import Callable from typing import Any, TypeVar -import sentry_sdk from loguru import logger from prisma.models import ( @@ -84,6 +83,7 @@ async def _execute_query( self, operation: Callable[[], Any], error_msg: str, + op_name: str, ) -> Any: """Executes a database query with standardized error logging. @@ -96,6 +96,8 @@ async def _execute_query( A zero-argument function (e.g., a lambda) that performs the database call. error_msg : str The base error message to log if an exception occurs. + op_name : str + The name of the database operation (e.g., 'find_one', 'create'). Returns ------- @@ -107,25 +109,14 @@ async def _execute_query( Exception Re-raises any exception caught during the database operation. """ - # Create a Sentry span to track database query performance - if sentry_sdk.is_initialized(): - with sentry_sdk.start_span(op="db.query", description=f"Database query: {self.table_name}") as span: - span.set_tag("db.table", self.table_name) - try: - result = await operation() - span.set_status("ok") - return result # noqa: TRY300 - except Exception as e: - span.set_status("internal_error") - span.set_data("error", str(e)) - logger.error(f"{error_msg}: {e}") - raise - else: - try: - return await operation() - except Exception as e: - logger.error(f"{error_msg}: {e}") - raise + # Remove span creation here to avoid duplication with controller-level spans + # Controller methods should handle their own tracing for meaningful operations + try: + result = await operation() + return result + except Exception as e: + logger.error(f"{error_msg}: {e}") + raise def _add_include_arg_if_present(self, args: dict[str, Any], include: dict[str, bool] | None) -> None: """Adds the 'include' argument to a dictionary if it is not None.""" @@ -238,6 +229,7 @@ async def find_one( return await self._execute_query( lambda: self.table.find_first(**find_args), f"Failed to find record in {self.table_name} with criteria {where}", + "find_one", ) async def find_unique( @@ -263,6 +255,7 @@ async def find_unique( return await self._execute_query( lambda: self.table.find_unique(**find_args), f"Failed to find unique record in {self.table_name} with criteria {where}", + "find_unique", ) async def find_many( @@ -307,6 +300,7 @@ async def find_many( return await self._execute_query( lambda: self.table.find_many(**find_args), f"Failed to find records in {self.table_name} with criteria {where}", + "find_many", ) async def count( @@ -328,6 +322,7 @@ async def count( return await self._execute_query( lambda: self.table.count(where=where), f"Failed to count records in {self.table_name} with criteria {where}", + "count", ) async def create( @@ -353,6 +348,7 @@ async def create( return await self._execute_query( lambda: self.table.create(**create_args), f"Failed to create record in {self.table_name} with data {data}", + "create", ) async def update( @@ -381,6 +377,7 @@ async def update( return await self._execute_query( lambda: self.table.update(**update_args), f"Failed to update record in {self.table_name} with criteria {where} and data {data}", + "update", ) async def delete( @@ -406,6 +403,7 @@ async def delete( return await self._execute_query( lambda: self.table.delete(**delete_args), f"Failed to delete record in {self.table_name} with criteria {where}", + "delete", ) async def upsert( @@ -437,6 +435,7 @@ async def upsert( return await self._execute_query( lambda: self.table.upsert(**upsert_args), f"Failed to upsert record in {self.table_name} with where={where}, create={create}, update={update}", + "upsert", ) async def update_many( @@ -466,6 +465,7 @@ async def update_many( result = await self._execute_query( lambda: self.table.update_many(where=where, data=data), f"Failed to update records in {self.table_name} with criteria {where} and data {data}", + "update_many", ) # Validate and return count count_val = getattr(result, "count", None) @@ -498,6 +498,7 @@ async def delete_many( result = await self._execute_query( lambda: self.table.delete_many(where=where), f"Failed to delete records in {self.table_name} with criteria {where}", + "delete_many", ) # Validate and return count count_val = getattr(result, "count", None) diff --git a/tux/database/controllers/levels.py b/tux/database/controllers/levels.py index 87d39af72..6efd3d18e 100644 --- a/tux/database/controllers/levels.py +++ b/tux/database/controllers/levels.py @@ -21,6 +21,28 @@ def __init__(self) -> None: super().__init__("levels") self.guild_table: GuildActions[Guild] = db.client.guild + async def get_user_level_data(self, member_id: int, guild_id: int) -> Levels | None: + """ + Fetches all level-related data for a user in a single query. + + Parameters + ---------- + member_id : int + The ID of the member. + guild_id : int + The ID of the guild. + + Returns + ------- + Levels | None + The levels record for the user, or None if not found. + """ + try: + return await self.find_one(where={"member_id": member_id, "guild_id": guild_id}) + except Exception as e: + logger.error(f"Error querying level data for member_id: {member_id}, guild_id: {guild_id}: {e}") + return None + async def get_xp(self, member_id: int, guild_id: int) -> float: """Get the XP of a member in a guild. diff --git a/tux/utils/banner.py b/tux/utils/banner.py index 4cfe6c220..a0cc60126 100644 --- a/tux/utils/banner.py +++ b/tux/utils/banner.py @@ -1,6 +1,7 @@ """Banner creation and formatting utilities for Tux.""" from dataclasses import dataclass, field +from datetime import UTC, datetime from typing import NamedTuple from rich.console import Console @@ -72,13 +73,13 @@ def _create_banner_table(self) -> Table: mode_text = "Development" if self.config.dev_mode else "Production" info_data = [ - ("", ""), # Empty row to shift content down - ("Bot Name", f"{self.config.bot_name} (Tux)"), + ("Bot Name", f"{self.config.bot_name}"), ("Version", self.config.version), ("Bot ID", str(self.config.bot_id or "Unknown")), ("Status", f"Watching {self.config.guild_count} servers with {self.config.user_count} users"), ("Prefix", self.config.prefix), ("Mode", Text(mode_text, style=mode_style)), + ("Date", datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S")), ] # Add rows, combining ASCII art with info diff --git a/tux/utils/context_utils.py b/tux/utils/context_utils.py new file mode 100644 index 000000000..5620099d1 --- /dev/null +++ b/tux/utils/context_utils.py @@ -0,0 +1,108 @@ +""" +Command and Interaction Context Utilities. + +This module provides helper functions to abstract and normalize the process of +extracting contextual information from different types of command invocations +in `discord.py`. + +The primary goal is to create a single, consistent dictionary format for context +data, regardless of whether the command was triggered by a traditional prefix +command (`commands.Context`) or a slash command (`discord.Interaction`). +This standardized context is invaluable for logging, error reporting (e.g., to +Sentry), and any other system that needs to operate on command data without +worrying about the source type. +""" + +from __future__ import annotations + +from typing import Any + +from discord import Interaction +from discord.ext import commands + +# Type alias for a command context or an interaction. +ContextOrInteraction = commands.Context[Any] | Interaction + + +def _get_interaction_details(source: Interaction) -> dict[str, Any]: + """ + Extracts context details specifically from a discord.Interaction. + + Parameters + ---------- + source : Interaction + The interaction object from a slash command. + + Returns + ------- + dict[str, Any] + A dictionary containing interaction-specific context. + """ + details: dict[str, Any] = { + "command_type": "slash", + "interaction_id": source.id, + "channel_id": source.channel_id, + "guild_id": source.guild_id, + } + if source.command: + details["command_name"] = source.command.qualified_name + return details + + +def _get_context_details(source: commands.Context[Any]) -> dict[str, Any]: + """ + Extracts context details specifically from a commands.Context. + + Parameters + ---------- + source : commands.Context[Any] + The context object from a prefix command. + + Returns + ------- + dict[str, Any] + A dictionary containing context-specific data. + """ + details: dict[str, Any] = { + "command_type": "prefix", + "message_id": source.message.id, + "channel_id": source.channel.id, + "guild_id": source.guild.id if source.guild else None, + } + if source.command: + details["command_name"] = source.command.qualified_name + details["command_prefix"] = source.prefix + details["command_invoked_with"] = source.invoked_with + return details + + +def get_interaction_context(source: ContextOrInteraction) -> dict[str, Any]: + """ + Builds a standardized dictionary of context from a command or interaction. + + This is the main public function of the module. It takes either a + `commands.Context` or a `discord.Interaction` and returns a dictionary + with a consistent set of keys, abstracting away the differences between + the two source types. + + Args: + source: The command `Context` or `Interaction` object. + + Returns: + A dictionary with standardized context keys like `user_id`, + `command_name`, `guild_id`, `command_type`, etc. + """ + user = source.user if isinstance(source, Interaction) else source.author + + # Base context is common to both types + context: dict[str, Any] = { + "user_id": user.id, + "user_name": str(user), + "is_interaction": isinstance(source, Interaction), + } + + # Delegate to helper functions for type-specific details + details = _get_interaction_details(source) if isinstance(source, Interaction) else _get_context_details(source) + context |= details + + return context diff --git a/tux/utils/emoji.py b/tux/utils/emoji_manager.py similarity index 100% rename from tux/utils/emoji.py rename to tux/utils/emoji_manager.py diff --git a/tux/utils/hot_reload.py b/tux/utils/hot_reload.py index 4a09670e7..299934393 100644 --- a/tux/utils/hot_reload.py +++ b/tux/utils/hot_reload.py @@ -27,7 +27,7 @@ from discord.ext import commands from loguru import logger -from tux.utils.sentry import span +from tux.utils.tracing import span # Type variables and protocols F = TypeVar("F", bound=Callable[..., Any]) diff --git a/tux/utils/protocols.py b/tux/utils/protocols.py new file mode 100644 index 000000000..203083fe9 --- /dev/null +++ b/tux/utils/protocols.py @@ -0,0 +1,32 @@ +""" +Defines structural type hints (Protocols) for dependency injection. + +This module contains Protocol classes that define the structure of objects +required by different parts of the application. By using these protocols +for type hinting instead of concrete classes (like `Tux`), we can achieve +loose coupling between components. + +This approach, known as structural subtyping or static duck typing, allows +any object that has the required attributes and methods to be used, +breaking circular import dependencies and making the codebase more modular +and easier to test. +""" + +from __future__ import annotations + +from collections.abc import Mapping +from typing import TYPE_CHECKING, Protocol + +if TYPE_CHECKING: + from discord.ext import commands + + from tux.utils.sentry_manager import SentryManager + + +class BotProtocol(Protocol): + """A protocol for the bot instance to provide necessary attributes.""" + + @property + def cogs(self) -> Mapping[str, commands.Cog]: ... + + sentry_manager: SentryManager diff --git a/tux/utils/sentry_manager.py b/tux/utils/sentry_manager.py new file mode 100644 index 000000000..ca124fff3 --- /dev/null +++ b/tux/utils/sentry_manager.py @@ -0,0 +1,560 @@ +""" +Sentry Integration Manager. + +This module provides the `SentryManager` class, a centralized wrapper for all +interactions with the Sentry SDK. Its primary responsibilities include: + +- **Initialization**: Configuring and initializing the Sentry SDK with the + appropriate DSN, release version, and environment settings. +- **Graceful Shutdown**: Handling OS signals (SIGTERM, SIGINT) to ensure that + all pending Sentry events are flushed before the application exits. +- **Context Management**: Providing methods to enrich Sentry events with + contextual data, such as user information, command details, and custom tags. +- **Event Capturing**: Offering a simplified interface (`capture_exception`, + `capture_message`) for sending events to Sentry. +""" + +from __future__ import annotations + +from types import FrameType +from typing import Any, ClassVar, Literal, cast + +import discord +import sentry_sdk +from discord import Interaction +from discord.ext import commands +from loguru import logger +from sentry_sdk.integrations.asyncio import AsyncioIntegration +from sentry_sdk.integrations.loguru import LoguruIntegration +from sentry_sdk.types import Event, Hint + +from tux.utils.config import CONFIG +from tux.utils.context_utils import get_interaction_context +from tux.utils.env import get_current_env + +# Type alias for Sentry's log level strings. +LogLevelStr = Literal["fatal", "critical", "error", "warning", "info", "debug"] + +# Type alias for a command context or an interaction. +ContextOrInteraction = commands.Context[commands.Bot] | Interaction + +sentry_sdk.set_user(None) + + +class SentryManager: + """ + Handles all interactions with the Sentry SDK for the bot. + + This class acts as a singleton-like manager (though not strictly enforced) + for initializing Sentry, capturing events, and managing performance + monitoring transactions. + """ + + # Standard Sentry transaction statuses. + # See: https://develop.sentry.dev/sdk/event-payloads/transaction/#transaction-status + STATUS: ClassVar[dict[str, str]] = { + "OK": "ok", + "UNKNOWN": "unknown", + "ERROR": "internal_error", + "NOT_FOUND": "not_found", + "PERMISSION_DENIED": "permission_denied", + "INVALID_ARGUMENT": "invalid_argument", + "RESOURCE_EXHAUSTED": "resource_exhausted", + "UNAUTHENTICATED": "unauthenticated", + "CANCELLED": "cancelled", + } + + def __init__(self) -> None: + """Initialize the SentryManager.""" + self.active_sentry_transactions: dict[int, Any] = {} + + # --- Setup & Lifecycle --- + + @staticmethod + def _before_send(event: Event, hint: Hint) -> Event | None: + """ + Filter and sanitize events before sending to Sentry. + + This hook allows us to: + - Remove sensitive information + - Filter out noisy errors + - Add error fingerprinting for better grouping + - Drop events we don't want to track + """ + # Filter out known noisy errors that provide little value + if "exc_info" in hint: + exc_type, exc_value, _ = hint["exc_info"] + + # Filter out network-related errors that are usually not actionable + if exc_type.__name__ in ("ConnectionResetError", "ConnectionAbortedError", "TimeoutError"): + return None + + # Add custom fingerprinting for Discord errors + if exc_type.__name__.startswith("Discord"): + event["fingerprint"] = [exc_type.__name__, str(getattr(exc_value, "code", "unknown"))] + + # Add fingerprinting for database errors + elif exc_type.__name__ in ("DatabaseError", "OperationalError", "IntegrityError"): + # Group database errors by type and first few words of message + error_msg = str(exc_value)[:50] if exc_value else "unknown" + event["fingerprint"] = ["database_error", exc_type.__name__, error_msg] + + # Add fingerprinting for command errors + elif exc_type.__name__.endswith("CommandError"): + command_name = event.get("tags", {}).get("command", "unknown") + event["fingerprint"] = ["command_error", exc_type.__name__, command_name] + + # Basic data sanitization - remove potentially sensitive info + # Remove sensitive data from request context if present + if "request" in event: + request = event["request"] + if "query_string" in request: + request["query_string"] = "[REDACTED]" + if "cookies" in request: + request["cookies"] = "[REDACTED]" + + return event + + @staticmethod + def _before_send_transaction(event: Event, hint: Hint) -> Event | None: + """ + Filter and modify transaction events before sending to Sentry. + + This helps reduce noise and improve transaction grouping. + """ + if event.get("type") == "transaction": + transaction_name = event.get("transaction", "") + + # Filter out noisy or uninteresting transactions entirely + if any( + op in transaction_name + for op in [ + "safe_get_attr", + "connect_or_create", + "_build_", + "_add_include", + "CogLoader.load_cogs_from_folder", # Startup noise + "CogLoader Setup", # More startup noise + "Bot shutdown process", # Shutdown noise + ] + ): + return None + + # Filter spans to reduce noise and group operations. + # This provides more meaningful and actionable traces. + if "spans" in event: + spans = cast(list[dict[str, Any]], event.get("spans") or []) + filtered_spans: list[dict[str, Any]] = [] + for span in spans: + op = span.get("op", "") + description = span.get("description", "") + + # Filter out internal Prisma HTTP requests to the query engine. + # These are implementation details and not useful for performance monitoring. + if op == "http.client" and "localhost" in description: + continue + + # Filter out noisy, low-level asyncio/library functions. + if "staggered_race" in description: + continue + + # Group database controller operations for cleaner reporting. + if "db.controller." in op: + if "get_" in op or "find_" in op: + span["op"] = "db.read" + elif "create_" in op: + span["op"] = "db.create" + elif "update_" in op or "increment_" in op: + span["op"] = "db.update" + elif "delete_" in op: + span["op"] = "db.delete" + elif "count_" in op: + span["op"] = "db.count" + else: + span["op"] = "db.other" + # Normalize description for grouped DB operations + span["description"] = f"DB {str(span['op']).split('.')[-1].capitalize()} Operation" + + filtered_spans.append(span) + event["spans"] = filtered_spans + + # Group all database controller transactions by type for cleaner reporting. + # This is a fallback for transactions that are purely DB operations. + if "db.controller." in transaction_name: + # Extract operation type and normalize + if "get_" in transaction_name or "find_" in transaction_name: + event["transaction"] = "db.controller.read_operation" + elif "create_" in transaction_name: + event["transaction"] = "db.controller.create_operation" + elif "update_" in transaction_name or "increment_" in transaction_name: + event["transaction"] = "db.controller.update_operation" + elif "delete_" in transaction_name: + event["transaction"] = "db.controller.delete_operation" + elif "count_" in transaction_name: + event["transaction"] = "db.controller.count_operation" + else: + event["transaction"] = "db.controller.other_operation" + + return event + + @staticmethod + def _traces_sampler(sampling_context: dict[str, Any]) -> float: + """ + Custom trace sampling function for more granular control over which traces to sample. + + Parameters + ---------- + sampling_context : dict[str, Any] + Context information about the transaction + + Returns + ------- + float + Sampling rate between 0.0 and 1.0 + """ + # Get transaction name for decision making + transaction_name = sampling_context.get("transaction_context", {}).get("name", "") + + # Very aggressive sampling in production to reduce noise + if get_current_env() not in ("dev", "development"): + # Almost no sampling for database operations (they're very frequent) + if "db.controller" in transaction_name: + return 0.01 # 1% sampling for DB operations + if "db.query" in transaction_name: + return 0.005 # 0.5% sampling for low-level DB queries + if "command" in transaction_name: + # Normal sampling for user commands (more important) + return 0.1 # 10% sampling for commands + if "cog." in transaction_name: + # Very low sampling for cog operations + return 0.02 # 2% sampling for cog ops + # Low sampling for other operations + return 0.05 # 5% sampling for other ops + + # Full sampling in development for debugging + return 1.0 + + @staticmethod + def setup() -> None: + """ + Initializes the Sentry SDK with configuration from the environment. + + If no Sentry DSN is provided in the configuration, setup is skipped. + This method configures the release version, environment, tracing, and + enables Sentry's logging integration. + """ + if not CONFIG.SENTRY_DSN: + logger.warning("No Sentry DSN configured, skipping Sentry setup") + return + + logger.info("Setting up Sentry...") + + try: + sentry_sdk.init( + # https://docs.sentry.io/platforms/python/configuration/options/#dsn + dsn=CONFIG.SENTRY_DSN, + # https://docs.sentry.io/platforms/python/configuration/options/#release + release=CONFIG.BOT_VERSION, + # https://docs.sentry.io/platforms/python/configuration/options/#environment + environment=get_current_env(), + integrations=[ + AsyncioIntegration(), + LoguruIntegration(), + ], + enable_tracing=True, + # https://docs.sentry.io/platforms/python/configuration/options/#attach_stacktrace + attach_stacktrace=True, + # https://docs.sentry.io/platforms/python/configuration/options/#send_default_pii + send_default_pii=False, + # https://docs.sentry.io/platforms/python/configuration/options/#traces_sample_rate + # Adjust sampling based on environment - 100% for dev, lower for production + traces_sample_rate=1.0 if get_current_env() in ("dev", "development") else 0.1, + # Set profiles_sample_rate to profile transactions. + # We recommend adjusting this value in production. + profiles_sample_rate=1.0 if get_current_env() in ("dev", "development") else 0.01, + # https://docs.sentry.io/platforms/python/configuration/filtering/#using-before-send + before_send=SentryManager._before_send, + before_send_transaction=SentryManager._before_send_transaction, + # Custom trace sampling function for more granular control + traces_sampler=SentryManager._traces_sampler, + _experiments={ + "enable_logs": True, + }, + ) + sentry_sdk.set_tag("discord_library_version", discord.__version__) + logger.info(f"Sentry initialized: {sentry_sdk.is_initialized()}") + except Exception as e: + logger.error(f"Failed to initialize Sentry: {e}") + + @staticmethod + def report_signal(signum: int, _frame: FrameType | None) -> None: + """ + A signal handler that reports termination signals to Sentry. + + This method is designed to be used with Python's `signal` module. + It captures signals like SIGTERM and SIGINT, adds context to Sentry, + and then raises a `KeyboardInterrupt` to trigger the bot's graceful + shutdown sequence. + + Parameters + ---------- + signum : int + The signal number received. + _frame : FrameType | None + The current stack frame at the time of the signal. + """ + if sentry_sdk.is_initialized(): + with sentry_sdk.push_scope() as scope: + scope.set_tag("signal.number", signum) + scope.set_tag("lifecycle.event", "termination_signal") + sentry_sdk.add_breadcrumb( + category="lifecycle", + message=f"Received termination signal {signum}", + level="info", + ) + raise KeyboardInterrupt + + @staticmethod + def flush() -> None: + """ + Flushes all pending Sentry events. + + This should be called during the application's shutdown sequence to + ensure that all buffered events are sent before the process exits. + """ + if sentry_sdk.is_initialized(): + sentry_sdk.flush() + + @property + def is_initialized(self) -> bool: + """ + A convenience property to check if the Sentry SDK is active. + + Returns + ------- + bool + True if Sentry is initialized, False otherwise. + """ + return sentry_sdk.is_initialized() + + # --- Event Capturing & Context --- + + def capture_exception( + self, + error: Exception, + *, + context: dict[str, Any] | None = None, + level: LogLevelStr = "error", + tags: dict[str, str] | None = None, + ) -> str | None: + """ + Captures and reports an exception to Sentry. + + This method enriches the exception report with additional context + and tags, providing more insight into the error. + + Parameters + ---------- + error : Exception + The exception object to capture. + context : dict[str, Any] | None, optional + A dictionary of context data to attach to the event. + level : LogLevelStr, optional + The severity level for the event (e.g., 'error', 'warning'). + tags : dict[str, str] | None, optional + Additional key-value tags to associate with the event. + + Returns + ------- + str | None + The Sentry event ID if capture was successful, otherwise None. + """ + if not self.is_initialized: + return None + + event_id: str | None = None + try: + with sentry_sdk.push_scope() as scope: + if context: + self._set_scope_context(scope, context) + + scope.level = level + + if tags: + for key, value in tags.items(): + scope.set_tag(key, value) + + event_id = sentry_sdk.capture_exception(error) + + if event_id: + logger.trace(f"Reported {type(error).__name__} to Sentry ({event_id})") + else: + logger.warning(f"Captured {type(error).__name__} but Sentry returned no ID.") + except Exception as e: + logger.error(f"Failed to report {type(error).__name__} to Sentry: {e}") + + return event_id + + def capture_message(self, message: str, level: LogLevelStr = "info") -> None: + """ + Captures and reports a message to Sentry. + + Parameters + ---------- + message : str + The message string to report. + level : LogLevelStr, optional + The severity level for the message. + """ + if self.is_initialized: + with sentry_sdk.push_scope() as scope: + scope.set_level(level) + sentry_sdk.capture_message(message) + logger.trace(f"Captured message in Sentry: {message}") + + def set_tag(self, key: str, value: Any) -> None: + """ + Sets a tag in the current Sentry scope. + + Tags are indexed key-value pairs that can be used for searching + and filtering events in Sentry. + + Parameters + ---------- + key : str + The name of the tag. + value : Any + The value of the tag. + """ + if self.is_initialized: + sentry_sdk.set_tag(key, value) + logger.trace(f"Set Sentry tag: {key}={value}") + + def set_context(self, key: str, value: dict[str, Any]) -> None: + """ + Sets context data in the current Sentry scope. + + Context provides additional, non-indexed data that is displayed + on the Sentry event page. + + Parameters + ---------- + key : str + The name of the context group (e.g., 'discord', 'user_info'). + value : dict[str, Any] + A dictionary of context data. + """ + if self.is_initialized: + sentry_sdk.set_context(key, value) + logger.trace(f"Set Sentry context for {key}.") + + # --- Transaction Management --- + + def finish_transaction_on_error(self) -> None: + """ + Finds and finishes an active Sentry transaction with an error status. + + This method should be called from an error handler. It automatically + accesses the current span and sets its status to 'internal_error'. + """ + if not self.is_initialized: + return + + if span := sentry_sdk.get_current_span(): + span.set_status(self.STATUS["ERROR"]) + logger.trace("Set Sentry span status to 'internal_error' for errored command.") + + # --- Internal Helpers --- + + def _set_scope_context(self, scope: Any, context: dict[str, Any]) -> None: + """ + Sets user, context, and tags on a Sentry scope from a context dictionary. + + Parameters + ---------- + scope : Any + The Sentry scope object to modify. + context : dict[str, Any] + A dictionary of context data. + """ + scope.set_user({"id": context.get("user_id"), "username": context.get("user_name")}) + scope.set_context("discord", context) + scope.set_tag("command_name", context.get("command_name", "Unknown")) + scope.set_tag("command_type", context.get("command_type", "Unknown")) + guild_id = context.get("guild_id") + scope.set_tag("guild_id", str(guild_id) if guild_id else "DM") + + def set_user_context(self, user: discord.User | discord.Member) -> None: + """ + Sets the user context for the current Sentry scope. + + This provides valuable information for debugging user-specific issues. + + Parameters + ---------- + user : discord.User | discord.Member + The Discord user or member to set as context. + """ + if not self.is_initialized: + return + + user_data: dict[str, Any] = { + "id": str(user.id), + "username": user.name, + "display_name": user.display_name, + "bot": user.bot, + "created_at": user.created_at.isoformat(), + } + + # Add member-specific data if available + if isinstance(user, discord.Member): + member_data = { + "guild_id": str(user.guild.id), + "guild_name": user.guild.name, + "nick": user.nick, + "joined_at": user.joined_at.isoformat() if user.joined_at else None, + "roles": [role.name for role in user.roles[1:]], # Exclude @everyone + "premium_since": user.premium_since.isoformat() if user.premium_since else None, + } + user_data |= member_data + + sentry_sdk.set_user(user_data) + logger.trace(f"Set Sentry user context for {user.name}") + + def set_command_context(self, ctx: ContextOrInteraction) -> None: + """ + Sets comprehensive command context for the current Sentry scope using existing context utilities. + + This enriches error reports with command-specific information. + + Parameters + ---------- + ctx : ContextOrInteraction + The command context or interaction. + """ + if not self.is_initialized: + return + + # Use existing context utilities to get standardized context data + context_data = get_interaction_context(ctx) + + # Set user context + user = ctx.user if isinstance(ctx, Interaction) else ctx.author + self.set_user_context(user) + + # Set guild context if available + if ctx.guild: + guild_data = { + "id": str(ctx.guild.id), + "name": ctx.guild.name, + "member_count": ctx.guild.member_count, + "created_at": ctx.guild.created_at.isoformat(), + "owner_id": str(ctx.guild.owner_id) if ctx.guild.owner_id else None, + "verification_level": ctx.guild.verification_level.name, + "premium_tier": ctx.guild.premium_tier, + "preferred_locale": str(ctx.guild.preferred_locale), + } + self.set_context("guild", guild_data) + + # Set command context using standardized data + self.set_context("command", context_data) diff --git a/tux/utils/task_manager.py b/tux/utils/task_manager.py new file mode 100644 index 000000000..e5d6c065f --- /dev/null +++ b/tux/utils/task_manager.py @@ -0,0 +1,685 @@ +""" +Asynchronous Task Management Utility. + +This module provides the `TaskManager` class, which encapsulates the logic for +monitoring, categorizing, and managing the lifecycle of asyncio tasks within the +bot. By abstracting this functionality, it keeps the main `Tux` class cleaner +and more focused on its core responsibilities. + +The manager is responsible for: +- Periodically monitoring all running asyncio tasks. +- Categorizing tasks based on their naming conventions (e.g., discord.py + internal tasks, scheduled tasks, command tasks). +- Gracefully stopping and cancelling tasks during the bot's shutdown sequence. +- Health monitoring and automatic recovery of critical tasks. +- Collecting performance metrics and statistics. +""" + +from __future__ import annotations + +import asyncio +import contextlib +import time +from collections import defaultdict, deque +from collections.abc import Callable, Coroutine +from dataclasses import dataclass, field +from enum import Enum, auto +from typing import Any, ClassVar, NamedTuple, cast + +from discord.ext import tasks +from loguru import logger + +from tux.utils.protocols import BotProtocol +from tux.utils.tracing import start_span, transaction + + +class TaskCategory(Enum): + """Categories for background tasks.""" + + SCHEDULED = auto() + GATEWAY = auto() + SYSTEM = auto() + COMMAND = auto() + UNKNOWN = auto() + + +class TaskPriority(Enum): + """Task priority levels for shutdown ordering.""" + + CRITICAL = auto() # Essential tasks (database, core services) + HIGH = auto() # Important tasks (moderation, reminders) + NORMAL = auto() # Regular tasks (levels, starboard) + LOW = auto() # Optional tasks (status, activities) + + +@dataclass +class TaskMetrics: + """Metrics tracking for individual tasks.""" + + name: str + category: TaskCategory + priority: TaskPriority = TaskPriority.NORMAL + start_time: float = field(default_factory=time.time) + restart_count: int = 0 + last_restart: float | None = None + total_runtime: float = 0.0 + avg_runtime: float = 0.0 + max_runtime: float = 0.0 + error_count: int = 0 + last_error: str | None = None + last_error_time: float | None = None + + +class TaskHealth(NamedTuple): + """Health status of a task.""" + + is_healthy: bool + uptime: float + error_rate: float + restart_count: int + last_seen: float + + +@dataclass +class CriticalTaskConfig: + """Configuration for critical tasks that should be monitored and restarted.""" + + name: str + cog_name: str + task_attr: str + priority: TaskPriority = TaskPriority.HIGH + max_restarts: int = 5 + restart_delay: float = 30.0 + health_check_interval: float = 300.0 # 5 minutes + + +class TaskManager: + """ + Enhanced task manager with health monitoring, metrics, and recovery capabilities. + + Manages the lifecycle of asyncio tasks for the bot with advanced features: + - Task registration and health monitoring + - Automatic recovery of failed critical tasks + - Performance metrics and statistics collection + """ + + # This mapping is used to categorize tasks based on the prefix of their names. + # It allows for easy identification of tasks from specific libraries or systems. + TASK_PREFIX_MAP: ClassVar[dict[tuple[str, ...], TaskCategory]] = { + ("discord-ext-tasks:",): TaskCategory.SCHEDULED, + ("discord.py:", "discord-voice-", "discord-gateway-"): TaskCategory.GATEWAY, + ("patch_asyncio",): TaskCategory.SYSTEM, + } + + # Default critical tasks that should be monitored + DEFAULT_CRITICAL_TASKS: ClassVar[list[CriticalTaskConfig]] = [ + CriticalTaskConfig("reminder_processor", "ReminderService", "reminder_processor", TaskPriority.CRITICAL), + CriticalTaskConfig("tempban_checker", "TempBan", "check_tempbans", TaskPriority.HIGH), + CriticalTaskConfig("afk_expiration_handler", "Afk", "handle_afk_expiration", TaskPriority.NORMAL), + CriticalTaskConfig("old_gif_remover", "GifLimiter", "old_gif_remover", TaskPriority.NORMAL), + CriticalTaskConfig("influx_guild_stats", "InfluxLogger", "_log_guild_stats", TaskPriority.LOW), + CriticalTaskConfig("influx_db_logger", "InfluxLogger", "logger", TaskPriority.LOW), + ] + + def __init__(self, bot: BotProtocol) -> None: + """ + Initialize the TaskManager with enhanced monitoring capabilities. + + Parameters + ---------- + bot : BotProtocol + The bot instance that conforms to the protocol. + """ + self.bot = bot + + # Task registration and monitoring + self.critical_tasks: dict[str, CriticalTaskConfig] = {} + self.task_metrics: dict[str, TaskMetrics] = {} + self.task_history: dict[str, deque[float]] = defaultdict(lambda: deque(maxlen=100)) + + # Health monitoring + self.last_health_check: float = 0.0 + self.health_check_interval: float = 300.0 # 5 minutes + + # Register default critical tasks + for task_config in self.DEFAULT_CRITICAL_TASKS: + self.register_critical_task(task_config) + + def setup_task_instrumentation(self) -> None: + """ + Initializes instrumentation for all registered critical tasks. + + This method should be called after all cogs are loaded to ensure + that the task objects are available to be wrapped. + """ + logger.info("Setting up Sentry instrumentation for critical tasks...") + + for task_name, config in self.critical_tasks.items(): + if not (cog := self.bot.cogs.get(config.cog_name)): + logger.warning(f"Cog {config.cog_name} not found for task {task_name}. Skipping instrumentation.") + continue + + if not (task_loop := getattr(cog, config.task_attr, None)): + logger.warning( + f"Task loop {config.task_attr} not found in cog {config.cog_name}. Skipping instrumentation.", + ) + continue + + if isinstance(task_loop, tasks.Loop): + try: + # We are confident .coro exists and is a callable coroutine on a tasks.Loop instance. + # The type checker struggles with this dynamic attribute from the discord.py library. + original_coro = cast(Callable[..., Coroutine[Any, Any, None]], task_loop.coro) + decorated_loop = transaction(op="task.run", name=f"task.{task_name}")(original_coro) + task_loop.coro = decorated_loop + logger.debug(f"Instrumented task: {task_name}") + except AttributeError: + logger.warning(f"Could not find a 'coro' on task {task_name}. Skipping instrumentation.") + else: + logger.warning( + f"Attribute {config.task_attr} in {config.cog_name} is not a Loop. Skipping instrumentation.", + ) + + # --- Public Methods --- + + def start(self) -> None: + """Starts the background task monitoring loop if it's not already running.""" + if not self._monitor_tasks_loop.is_running(): + self._monitor_tasks_loop.start() + logger.debug("Task monitoring loop started.") + logger.debug("Enhanced task monitoring started.") + + def stop(self) -> None: + """Stops the background task monitoring loop.""" + if self._monitor_tasks_loop.is_running(): + self._monitor_tasks_loop.stop() + logger.debug("Enhanced task monitoring stopped.") + + def register_critical_task(self, config: CriticalTaskConfig) -> None: + """ + Register a critical task for health monitoring and recovery. + + Parameters + ---------- + config : CriticalTaskConfig + Configuration for the critical task. + """ + self.critical_tasks[config.name] = config + self.task_metrics[config.name] = TaskMetrics( + name=config.name, + category=TaskCategory.SCHEDULED, + priority=config.priority, + ) + logger.debug(f"Registered critical task: {config.name}") + + def get_task_health(self, task_name: str) -> TaskHealth | None: + """ + Get health status for a specific task. + + Parameters + ---------- + task_name : str + The name of the task to check. + + Returns + ------- + TaskHealth | None + Health status or None if task not found. + """ + if (metrics := self.task_metrics.get(task_name)) is None: + return None + + current_time = time.time() + uptime = current_time - metrics.start_time + + # Calculate error rate (errors per hour) + error_rate = (metrics.error_count / max(uptime / 3600, 0.1)) if uptime > 0 else 0.0 + + # Task is healthy if it has low error rate and hasn't been restarting frequently + is_healthy = ( + error_rate < 10.0 # Less than 10 errors per hour + and metrics.restart_count < 3 # Less than 3 restarts + and (not metrics.last_restart or current_time - metrics.last_restart > 300) # No restart in last 5 minutes + ) + + return TaskHealth( + is_healthy=is_healthy, + uptime=uptime, + error_rate=error_rate, + restart_count=metrics.restart_count, + last_seen=current_time, + ) + + def get_task_statistics(self) -> dict[str, Any]: + """ + Get comprehensive task statistics. + + Returns + ------- + dict[str, Any] + Statistics about all monitored tasks. + """ + # Initialize counters + healthy_tasks = 0 + unhealthy_tasks = 0 + total_restarts = 0 + total_errors = 0 + categories: defaultdict[str, int] = defaultdict(int) + priorities: defaultdict[str, int] = defaultdict(int) + + for task_name, metrics in self.task_metrics.items(): + if health := self.get_task_health(task_name): + if health.is_healthy: + healthy_tasks += 1 + else: + unhealthy_tasks += 1 + + categories[metrics.category.name] += 1 + priorities[metrics.priority.name] += 1 + total_restarts += metrics.restart_count + total_errors += metrics.error_count + + return { + "total_tasks": len(self.task_metrics), + "critical_tasks": len(self.critical_tasks), + "healthy_tasks": healthy_tasks, + "unhealthy_tasks": unhealthy_tasks, + "categories": dict(categories), + "priorities": dict(priorities), + "total_restarts": total_restarts, + "total_errors": total_errors, + } + + async def restart_critical_task(self, task_name: str) -> bool: # noqa: PLR0911 + """ + Attempt to restart a critical task. + + Parameters + ---------- + task_name : str + The name of the task to restart. + + Returns + ------- + bool + True if restart was successful, False otherwise. + """ + # Validate task is critical and get config/metrics + if task_name not in self.critical_tasks: + logger.warning(f"Cannot restart non-critical task: {task_name}") + return False + + config = self.critical_tasks[task_name] + metrics = self.task_metrics[task_name] + current_time = time.time() + + # Check restart constraints + if metrics.restart_count >= config.max_restarts: + logger.error(f"Task {task_name} has exceeded max restarts ({config.max_restarts})") + return False + + if metrics.last_restart and current_time - metrics.last_restart < config.restart_delay: + logger.warning(f"Task {task_name} is in restart cooldown") + return False + + # Find and validate the cog and task + cog = self.bot.cogs.get(config.cog_name) + if not cog: + logger.error(f"Cog {config.cog_name} not found for task {task_name}") + return False + + task_loop = getattr(cog, config.task_attr, None) + if not isinstance(task_loop, tasks.Loop): + logger.error(f"Task {config.task_attr} not found in cog {config.cog_name}") + return False + + # Attempt restart + try: + if task_loop.is_running(): + task_loop.restart() + else: + task_loop.start() + except Exception as e: + logger.error(f"Failed to restart task {task_name}: {e}") + self.bot.sentry_manager.capture_exception(e) + return False + else: + # Update metrics on successful restart + metrics.restart_count += 1 + metrics.last_restart = current_time + metrics.start_time = current_time + + logger.info(f"Successfully restarted critical task: {task_name}") + return True + + async def cancel_all_tasks(self) -> None: + """ + Gracefully cancels all managed asyncio tasks with priority ordering. + + This is the main entrypoint for the shutdown process. It stops all + `discord.ext.tasks` loops and then proceeds to cancel all other + categorized tasks in priority order. + """ + with start_span("bot.cleanup_tasks", "Cleaning up running tasks"): + try: + await self._stop_task_loops() + + all_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()] + tasks_by_type = self._categorize_tasks(all_tasks) + + # Cancel tasks in priority order (low priority first) + await self._cancel_tasks_by_priority(tasks_by_type) + + except Exception as e: + logger.error(f"Error during task cleanup: {e}") + self.bot.sentry_manager.capture_exception(e) + + # --- Monitoring Loop --- + + @tasks.loop(seconds=60) + async def _monitor_tasks_loop(self) -> None: + """ + Enhanced task monitoring with health checks and metrics collection. + + This loop runs every 60 seconds to gather all tasks, categorize them, + handle finished tasks, perform health checks, and collect metrics. + + Raises + ------ + RuntimeError + If a critical, unhandled exception occurs during task monitoring. + """ + with start_span("bot.monitor_tasks", "Monitoring async tasks"): + try: + current_time = time.time() + all_tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()] + tasks_by_type = self._categorize_tasks(all_tasks) + + except Exception as e: + logger.error(f"Error during task categorization: {e}") + self.bot.sentry_manager.capture_exception(e) + return + + try: + await self._process_finished_tasks(tasks_by_type) + except Exception as e: + logger.error(f"Error processing finished tasks: {e}") + self.bot.sentry_manager.capture_exception(e) + + try: + self._update_task_metrics(tasks_by_type, current_time) + except Exception as e: + logger.error(f"Error updating task metrics: {e}") + self.bot.sentry_manager.capture_exception(e) + + try: + if current_time - self.last_health_check > self.health_check_interval: + await self._perform_health_checks() + self.last_health_check = current_time + except Exception as e: + logger.error(f"Error performing health checks: {e}") + self.bot.sentry_manager.capture_exception(e) + + # --- Task Categorization & Processing --- + + def _categorize_tasks(self, tasks: list[asyncio.Task[Any]]) -> dict[TaskCategory, list[asyncio.Task[Any]]]: + """ + Categorizes a list of tasks based on their names. + + Parameters + ---------- + tasks : list[asyncio.Task[Any]] + The list of asyncio tasks to categorize. + + Returns + ------- + dict[TaskCategory, list[asyncio.Task[Any]]] + A dictionary mapping each task category to a list of tasks. + """ + tasks_by_type: dict[TaskCategory, list[asyncio.Task[Any]]] = {category: [] for category in TaskCategory} + + for task in tasks: + if task.done(): + continue + + name = self._get_task_name(task) + category = self._get_task_category(name) + tasks_by_type.setdefault(category, []).append(task) + + if unknown_tasks := tasks_by_type.get(TaskCategory.UNKNOWN): + task_names = [self._get_task_name(t) for t in unknown_tasks] + logger.warning(f"Found {len(unknown_tasks)} uncategorized tasks: {', '.join(task_names)}") + + return tasks_by_type + + def _get_task_category(self, name: str) -> TaskCategory: + """ + Determines the category of a task from its name. + + It first checks against the `TASK_PREFIX_MAP` for known system/library + tasks, then checks for command-related tasks, and finally defaults + to a general system task. + + Parameters + ---------- + name : str + The name of the asyncio task. + + Returns + ------- + TaskCategory + The determined category for the task. + """ + if name in self.critical_tasks: + return TaskCategory.SCHEDULED + + # Default asyncio tasks (e.g., Task-1) are considered SYSTEM tasks. + if name.startswith("Task-"): + return TaskCategory.SYSTEM + + return next( + ( + category + for prefixes, category in self.TASK_PREFIX_MAP.items() + if any(name.startswith(p) for p in prefixes) + ), + (TaskCategory.COMMAND if "command_" in name.lower() else TaskCategory.UNKNOWN), + ) + + async def _process_finished_tasks(self, tasks_by_type: dict[TaskCategory, list[asyncio.Task[Any]]]) -> None: + """ + Awaits any tasks that have already completed to handle their results. + + This is important for preventing "awaitable was never awaited" warnings + and ensuring that exceptions from completed tasks are raised and logged. + + Parameters + ---------- + tasks_by_type : dict[TaskCategory, list[asyncio.Task[Any]]] + A dictionary of tasks, categorized by type. + """ + for task_list in tasks_by_type.values(): + for task in task_list: + if task.done(): + with contextlib.suppress(asyncio.CancelledError): + try: + await task + except Exception as e: + # Log task exceptions and update metrics + logger.error(f"Task {(task_name := self._get_task_name(task))} failed with exception: {e}") + self._record_task_error(task_name, str(e)) + + def _update_task_metrics( + self, + tasks_by_type: dict[TaskCategory, list[asyncio.Task[Any]]], + current_time: float, + ) -> None: + """ + Update metrics for all running tasks. + + Parameters + ---------- + tasks_by_type : dict[TaskCategory, list[asyncio.Task[Any]]] + Categorized tasks. + current_time : float + Current timestamp. + """ + # Update runtime metrics for critical tasks + for task_name, config in self.critical_tasks.items(): + if cog := self.bot.cogs.get(config.cog_name): + task_loop = getattr(cog, config.task_attr, None) + if isinstance(task_loop, tasks.Loop) and task_loop.is_running(): + metrics = self.task_metrics[task_name] + metrics.total_runtime = current_time - metrics.start_time + self.task_history[task_name].append(current_time) + + async def _perform_health_checks(self) -> None: + """Perform health checks on all critical tasks.""" + unhealthy_tasks: list[str] = [] + + for task_name, config in self.critical_tasks.items(): + cog = self.bot.cogs.get(config.cog_name) + if not cog: + logger.warning(f"Cog {config.cog_name} not found for critical task {task_name}") + continue + + task_loop = getattr(cog, config.task_attr, None) + if not isinstance(task_loop, tasks.Loop): + logger.warning(f"Task {config.task_attr} not found in cog {config.cog_name}") + continue + + # Check if task is running + if not task_loop.is_running(): + logger.warning(f"Critical task {task_name} is not running") + unhealthy_tasks.append(task_name) + continue + + # Check task health + health = self.get_task_health(task_name) + if health and not health.is_healthy: + logger.warning(f"Critical task {task_name} is unhealthy: {health}") + unhealthy_tasks.append(task_name) + + # Attempt to restart unhealthy critical tasks + for task_name in unhealthy_tasks: + if await self.restart_critical_task(task_name): + logger.info(f"Successfully recovered unhealthy task: {task_name}") + + def _record_task_error(self, task_name: str, error_msg: str) -> None: + """ + Record an error for a task. + + Parameters + ---------- + task_name : str + The name of the task. + error_msg : str + The error message. + """ + if task_name in self.task_metrics: + metrics = self.task_metrics[task_name] + metrics.error_count += 1 + metrics.last_error = error_msg + metrics.last_error_time = time.time() + + # --- Shutdown & Cleanup --- + + async def _stop_task_loops(self) -> None: + """ + Stops all registered `discord.ext.tasks.Loop` instances in all cogs. + + This is a critical first step in the cleanup process to prevent new + tasks from being created while shutdown is in progress. + """ + with start_span("bot.stop_task_loops", "Stopping task loops"): + for cog_name, cog in self.bot.cogs.items(): + if not cog: + continue + + for name, value in cog.__dict__.items(): + if isinstance(value, tasks.Loop): + try: + value.stop() + logger.debug(f"Stopped task loop {cog_name}.{name}") + except Exception as e: + logger.error(f"Error stopping task loop {cog_name}.{name}: {e}") + + # Only stop the monitor loop if all cog tasks were processed without critical errors + if self._monitor_tasks_loop.is_running(): + self._monitor_tasks_loop.stop() + + @staticmethod + def _get_task_name(task: asyncio.Task[Any]) -> str: + """ + Gets a descriptive name for an asyncio task. + + If a task was not explicitly named, it attempts to derive a name + from its coroutine object for better logging. + + Parameters + ---------- + task : asyncio.Task[Any] + The asyncio task to get the name from. + + Returns + ------- + str + A descriptive name for the task. + """ + name = task.get_name() or "unnamed" + if name in ("None", "unnamed"): + coro = task.get_coro() + name = getattr(coro, "__qualname__", str(coro)) + return name + + async def _cancel_tasks_by_priority(self, tasks_by_type: dict[TaskCategory, list[asyncio.Task[Any]]]) -> None: + """ + Cancel tasks in priority order (low priority first). + + Parameters + ---------- + tasks_by_type : dict[TaskCategory, list[asyncio.Task[Any]]] + The dictionary of tasks to be cancelled. + """ + # Define shutdown priority order (low priority first) + shutdown_order = [ + TaskCategory.UNKNOWN, + TaskCategory.COMMAND, + TaskCategory.SYSTEM, + TaskCategory.SCHEDULED, + TaskCategory.GATEWAY, + ] + + with start_span("bot.cancel_tasks", "Cancelling tasks by priority") as span: + for category in shutdown_order: + task_list = tasks_by_type.get(category, []) + if not task_list: + continue + + task_names = [self._get_task_name(t) for t in task_list] + names = ", ".join(task_names) + + logger.debug(f"Cancelling {len(task_list)} {category.name}: {names}") + span.set_data(f"tasks.{category.name.lower()}", task_names) + + for task in task_list: + task.cancel() + + results = await asyncio.gather(*task_list, return_exceptions=True) + + for result in results: + if isinstance(result, Exception) and not isinstance(result, asyncio.CancelledError): + logger.error(f"Exception during task cancellation for {category.name}: {result!r}") + + logger.debug(f"Cancelled {category.name}") + + async def _cancel_tasks(self, tasks_by_type: dict[TaskCategory, list[asyncio.Task[Any]]]) -> None: + """ + Legacy method - redirects to priority-based cancellation. + + Parameters + ---------- + tasks_by_type : dict[TaskCategory, list[asyncio.Task[Any]]] + The dictionary of tasks to be cancelled. + """ + await self._cancel_tasks_by_priority(tasks_by_type) diff --git a/tux/utils/tracing.py b/tux/utils/tracing.py new file mode 100644 index 000000000..4233a6b88 --- /dev/null +++ b/tux/utils/tracing.py @@ -0,0 +1,518 @@ +""" +Sentry Instrumentation Utilities for Tracing and Performance Monitoring. + +This module provides a set of decorators and context managers to simplify the +instrumentation of code with Sentry transactions and spans. It standardizes the +creation of performance monitoring traces and ensures that they gracefully handle +cases where the Sentry SDK is not initialized by providing dummy objects. + +The main components are: +- Decorators (`@transaction`, `@span`): For easily wrapping entire functions or + methods in a Sentry transaction or span. +- Context Managers (`start_transaction`, `start_span`): For instrumenting + specific blocks of code within a function. +- Helper Functions: For adding contextual data to the currently active span. +""" + +import asyncio +import functools +import time +import traceback +from collections.abc import Callable, Coroutine, Generator +from contextlib import contextmanager +from typing import Any, ParamSpec, TypeVar, cast + +import sentry_sdk +from discord.ext import commands +from loguru import logger + +# Type variables for better type hints with generic functions +P = ParamSpec("P") +T = TypeVar("T") +R = TypeVar("R") + + +# --- Dummy Objects for Graceful Failure --- + + +class DummySpan: + """ + A no-op (dummy) span object for when the Sentry SDK is not initialized. + + This class mimics the interface of a Sentry span but performs no actions, + allowing instrumentation code (`with start_span(...)`) to run without errors + even if Sentry is disabled. + """ + + def __init__(self) -> None: + """Initialize the dummy span.""" + self.start_time = time.perf_counter() + + def set_tag(self, *args: Any, **kwargs: Any) -> "DummySpan": + """No-op tag setter.""" + return self + + def set_data(self, *args: Any, **kwargs: Any) -> "DummySpan": + """No-op data setter.""" + return self + + def set_status(self, *args: Any, **kwargs: Any) -> "DummySpan": + """No-op status setter.""" + return self + + def set_name(self, name: str) -> "DummySpan": + """No-op name setter.""" + return self + + +class DummyTransaction(DummySpan): + """ + A no-op (dummy) transaction object for when Sentry is not initialized. + + This inherits from `DummySpan` and provides a safe fallback for the + `start_transaction` context manager. + """ + + +# --- Common Helpers --- + + +def safe_set_name(obj: Any, name: str) -> None: + """ + Safely set the name on a span or transaction object. + + This helper is used because the `set_name` method may not always be + present on all span-like objects from Sentry, so this avoids + potential `AttributeError` exceptions. + + Parameters + ---------- + obj : Any + The span or transaction object. + name : str + The name to set. + """ + if hasattr(obj, "set_name"): + # Use getattr to avoid static type checking issues + set_name_func = obj.set_name + set_name_func(name) + + +def _handle_exception_in_sentry_context(context_obj: Any, exception: Exception) -> None: + """ + Handle exceptions in a Sentry context (span or transaction) with consistent patterns. + + Parameters + ---------- + context_obj : Any + The Sentry span or transaction object. + exception : Exception + The exception that occurred. + """ + context_obj.set_status("internal_error") + context_obj.set_data("error", str(exception)) + context_obj.set_data("traceback", traceback.format_exc()) + + +def _finalize_sentry_context(context_obj: Any, start_time: float) -> None: + """ + Finalize a Sentry context with timing information. + + Parameters + ---------- + context_obj : Any + The Sentry span or transaction object. + start_time : float + The start time for duration calculation. + """ + context_obj.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) + + +def create_instrumentation_wrapper[**P, R]( + func: Callable[P, R], + context_factory: Callable[[], Any], + is_transaction: bool = False, +) -> Callable[P, R]: + """ + Creates an instrumentation wrapper for both sync and async functions. + + This is the core helper that eliminates duplication between transaction + and span decorators by providing a unified wrapper creation mechanism. + + Parameters + ---------- + func : Callable[P, R] + The function to wrap. + context_factory : Callable[[], Any] + A factory function that creates the Sentry context (span or transaction). + is_transaction : bool, optional + Whether this is a transaction (affects status setting behavior). + + Returns + ------- + Callable[P, R] + The wrapped function. + """ + if asyncio.iscoroutinefunction(func): + + @functools.wraps(func) + async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + start_time = time.perf_counter() + + if not sentry_sdk.is_initialized(): + return await func(*args, **kwargs) + + with context_factory() as context_obj: + try: + # Set name for spans (transactions handle this themselves) + if not is_transaction: + safe_set_name(context_obj, func.__qualname__) + + result = await func(*args, **kwargs) + except Exception as e: + _handle_exception_in_sentry_context(context_obj, e) + raise + else: + context_obj.set_status("ok") + return result + finally: + _finalize_sentry_context(context_obj, start_time) + + return cast(Callable[P, R], async_wrapper) + + @functools.wraps(func) + def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + start_time = time.perf_counter() + + if not sentry_sdk.is_initialized(): + return func(*args, **kwargs) + + with context_factory() as context_obj: + try: + # Set name for spans (transactions handle this themselves) + if not is_transaction: + safe_set_name(context_obj, func.__qualname__) + + result = func(*args, **kwargs) + except Exception as e: + _handle_exception_in_sentry_context(context_obj, e) + raise + else: + context_obj.set_status("ok") + return result + finally: + _finalize_sentry_context(context_obj, start_time) + + return sync_wrapper + + +# --- Decorators --- + + +def transaction( + op: str, + name: str | None = None, + description: str | None = None, +) -> Callable[[Callable[P, R]], Callable[P, R]]: + """ + Decorator to wrap a function with a Sentry transaction. + + This handles both synchronous and asynchronous functions automatically. + It captures the function's execution time, sets the status to 'ok' on + success or 'internal_error' on failure, and records exceptions. + + Parameters + ---------- + op : str + The operation name for the transaction (e.g., 'db.query'). + name : Optional[str] + The name for the transaction. Defaults to the function's qualified name. + description : Optional[str] + A description of what the transaction is doing. + + Returns + ------- + Callable + The decorated function. + """ + + def decorator(func: Callable[P, R]) -> Callable[P, R]: + transaction_name = name or f"{func.__module__}.{func.__qualname__}" + transaction_description = description or f"Executing {func.__qualname__}" + + def context_factory() -> Any: + return sentry_sdk.start_transaction( + op=op, + name=transaction_name, + description=transaction_description, + ) + + return create_instrumentation_wrapper(func, context_factory, is_transaction=True) + + return decorator + + +def span(op: str, description: str | None = None) -> Callable[[Callable[P, R]], Callable[P, R]]: + """ + Decorator to wrap a function with a Sentry span. + + This should be used on functions called within an existing transaction. + It automatically handles both sync and async functions, captures execution + time, and records success or failure status. + + Parameters + ---------- + op : str + The operation name for the span (e.g., 'db.query.fetch'). + description : Optional[str] + A description of what the span is doing. Defaults to the function's name. + + Returns + ------- + Callable + The decorated function. + """ + + def decorator(func: Callable[P, R]) -> Callable[P, R]: + span_description = description or f"Executing {func.__qualname__}" + + def context_factory() -> Any: + return sentry_sdk.start_span(op=op, description=span_description) + + return create_instrumentation_wrapper(func, context_factory, is_transaction=False) + + return decorator + + +# --- Context Managers --- + + +@contextmanager +def start_span(op: str, name: str = "") -> Generator[DummySpan | Any]: + """ + Context manager for creating a Sentry span for a block of code. + + Example: + with start_span("db.query", "Fetching user data"): + ... + + Parameters + ---------- + op : str + The operation name for the span. + name : str + The name of the span. + + Yields + ------ + Union[DummySpan, sentry_sdk.Span] + The Sentry span object or a dummy object if Sentry is not initialized. + """ + start_time = time.perf_counter() + + if not sentry_sdk.is_initialized(): + # Create a dummy context if Sentry is not available + dummy = DummySpan() + try: + yield dummy + finally: + pass + else: + with sentry_sdk.start_span(op=op, name=name) as span: + try: + yield span + finally: + span.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) + + +@contextmanager +def start_transaction(op: str, name: str, description: str = "") -> Generator[DummyTransaction | Any]: + """ + Context manager for creating a Sentry transaction for a block of code. + + Example: + with start_transaction("task", "process_daily_report"): + ... + + Parameters + ---------- + op : str + The operation name for the transaction. + name : str + The name for the transaction. + description : str + A description of what the transaction is doing. + + Yields + ------ + Union[DummyTransaction, sentry_sdk.Transaction] + The Sentry transaction object or a dummy object if Sentry is not initialized. + """ + start_time = time.perf_counter() + + if not sentry_sdk.is_initialized(): + # Create a dummy context if Sentry is not available + dummy = DummyTransaction() + try: + yield dummy + finally: + pass + else: + with sentry_sdk.start_transaction(op=op, name=name, description=description) as transaction: + try: + yield transaction + finally: + transaction.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) + + +# --- Enhanced Helper Functions --- + + +def add_tag_to_current_span(key: str, value: Any) -> None: + """ + Add a tag to the current active Sentry span, if it exists. + + This is a convenience function to avoid checking for an active span + everywhere in the code. + + Parameters + ---------- + key : str + The key of the tag. + value : Any + The value of the tag. + """ + if sentry_sdk.is_initialized() and (span := sentry_sdk.get_current_span()): + span.set_tag(key, value) + + +def add_data_to_current_span(key: str, value: Any) -> None: + """ + Add data to the current active Sentry span, if it exists. + + This is a convenience function to attach arbitrary, non-indexed data + to a span for additional context during debugging. + + Parameters + ---------- + key : str + The key of the data. + value : Any + The value of the data. + """ + if sentry_sdk.is_initialized() and (span := sentry_sdk.get_current_span()): + span.set_data(key, value) + + +def set_span_attributes(attributes: dict[str, Any]) -> None: + """ + Set multiple tags and data attributes on the current active Sentry span. + + This helper function simplifies attaching context to a span by accepting a + dictionary of attributes. Keys are automatically treated as tags. + + Parameters + ---------- + attributes : dict[str, Any] + A dictionary where keys are the attribute names and values are the + attribute values to set on the span. + """ + if sentry_sdk.is_initialized() and (span := sentry_sdk.get_current_span()): + for key, value in attributes.items(): + span.set_tag(key, value) + + +def capture_span_exception(exception: Exception, **extra_data: Any) -> None: + """ + Capture an exception in the current span with consistent error handling. + + This consolidates the common pattern of setting span status and data + when an exception occurs. + + Parameters + ---------- + exception : Exception + The exception to capture. + **extra_data : Any + Additional data to attach to the span. + """ + if sentry_sdk.is_initialized() and (span := sentry_sdk.get_current_span()): + _handle_exception_in_sentry_context(span, exception) + + # Add any additional data + for key, value in extra_data.items(): + span.set_data(f"extra.{key}", value) + + +@contextmanager +def enhanced_span(op: str, name: str = "", **initial_data: Any) -> Generator[DummySpan | Any]: + """ + Enhanced context manager for creating a Sentry span with initial data. + + This extends the basic start_span with the ability to set initial + tags and data, reducing boilerplate in calling code. + + Parameters + ---------- + op : str + The operation name for the span. + name : str + The name for the span. + **initial_data : Any + Initial data to set on the span. + + Yields + ------ + Union[DummySpan, sentry_sdk.Span] + The Sentry span object or a dummy object if Sentry is not initialized. + """ + # Skip spans for very short utility operations in production + if not sentry_sdk.is_initialized(): + yield DummySpan() + return + + # In production, skip tracing for certain frequent operations + env = initial_data.get("environment", "development") + if env not in ("dev", "development"): + if any(skip_term in name.lower() for skip_term in ["safe_get_attr", "connect_or_create"]): + yield DummySpan() + return + + with start_span(op, name) as span: + # Set initial data if provided + if initial_data: + for key, value in initial_data.items(): + span.set_tag(key, value) + + try: + yield span + except Exception as e: + capture_span_exception(e) + raise + + +def instrument_bot_commands(bot: commands.Bot) -> None: + """ + Automatically instruments all bot commands with Sentry transactions. + + This function iterates through all registered commands on the bot and + wraps their callbacks with the `@transaction` decorator. This ensures + that every command invocation is captured as a Sentry transaction. + + Parameters + ---------- + bot : commands.Bot + The instance of the bot whose commands should be instrumented. + """ + for command in bot.walk_commands(): + # The operation for commands is standardized as `command.run` + op = "command.run" + + # The transaction name is the full command name (e.g., "snippet get") + transaction_name = f"command.{command.qualified_name}" + + # Apply the transaction decorator to the command's callback + original_callback = cast(Callable[..., Coroutine[Any, Any, None]], command.callback) + command.callback = transaction(op=op, name=transaction_name)(original_callback) + + logger.info(f"Instrumented {len(list(bot.walk_commands()))} commands with Sentry.") From 155bb58c9ba208cc7ca8705e2dece4355f566deb Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 00:22:06 -0400 Subject: [PATCH 02/34] refactor: modernize error handling and sentry integration - Replace old sentry.py with new sentry_manager.py - Update error handlers with improved context - Streamline sentry error reporting --- tux/handlers/error.py | 379 ++++++++++++----------------------------- tux/handlers/sentry.py | 186 +++----------------- tux/utils/sentry.py | 291 ------------------------------- 3 files changed, 134 insertions(+), 722 deletions(-) delete mode 100644 tux/utils/sentry.py diff --git a/tux/handlers/error.py b/tux/handlers/error.py index 93e94e992..09569b425 100644 --- a/tux/handlers/error.py +++ b/tux/handlers/error.py @@ -12,17 +12,17 @@ import traceback from collections.abc import Callable, Coroutine from dataclasses import dataclass -from typing import Any +from typing import Any, cast import discord import Levenshtein -import sentry_sdk from discord import app_commands from discord.ext import commands from loguru import logger from tux.bot import Tux from tux.ui.embeds import EmbedCreator +from tux.utils.context_utils import get_interaction_context from tux.utils.exceptions import ( AppCommandPermissionLevelError, CodeExecutionError, @@ -32,6 +32,7 @@ PermissionLevelError, UnsupportedLanguageError, ) +from tux.utils.sentry_manager import LogLevelStr, SentryManager # --- Constants and Configuration --- @@ -75,15 +76,6 @@ # Note: Interaction is parameterized with the Bot type (Tux). AppCommandErrorHandler = Callable[[discord.Interaction[Tux], app_commands.AppCommandError], Coroutine[Any, Any, None]] -# --- Sentry Status Constants (copied from sentry.py for local use) --- -SENTRY_STATUS_OK = "ok" -SENTRY_STATUS_UNKNOWN = "unknown" -SENTRY_STATUS_INTERNAL_ERROR = "internal_error" -SENTRY_STATUS_NOT_FOUND = "not_found" -SENTRY_STATUS_PERMISSION_DENIED = "permission_denied" -SENTRY_STATUS_INVALID_ARGUMENT = "invalid_argument" -SENTRY_STATUS_RESOURCE_EXHAUSTED = "resource_exhausted" - # --- Error Handler Configuration --- @@ -101,6 +93,9 @@ class ErrorHandlerConfig: # Default log level for this error type (e.g., "INFO", "WARNING", "ERROR"). log_level: str = "INFO" + # Sentry transaction status for this error. If None, it's considered an internal error. + sentry_status: str | None = SentryManager.STATUS["ERROR"] + # Whether to send this specific error type to Sentry when handled. # Useful for tracking frequency even if the user sees a friendly message. send_to_sentry: bool = True @@ -222,6 +217,7 @@ def _extract_missing_argument_details(error: Exception) -> dict[str, Any]: app_commands.AppCommandError: ErrorHandlerConfig( message_format="An application command error occurred: {error}", log_level="WARNING", + sentry_status=SentryManager.STATUS["UNKNOWN"], ), # CommandInvokeError wraps the actual exception raised within an app command. # It will be unwrapped in _handle_error, but this provides a fallback config. @@ -229,41 +225,49 @@ def _extract_missing_argument_details(error: Exception) -> dict[str, Any]: message_format="An internal error occurred while running the command.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), app_commands.TransformerError: ErrorHandlerConfig( message_format="Failed to process an argument value: {error}", log_level="INFO", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), app_commands.MissingRole: ErrorHandlerConfig( message_format="You need the role {roles} to use this command.", detail_extractor=_extract_missing_role_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), app_commands.MissingAnyRole: ErrorHandlerConfig( message_format="You need one of the following roles: {roles}", detail_extractor=_extract_missing_any_role_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), app_commands.MissingPermissions: ErrorHandlerConfig( message_format="You lack the required permission(s): {permissions}", detail_extractor=_extract_permissions_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), # Generic check failure for app commands. app_commands.CheckFailure: ErrorHandlerConfig( message_format="You do not meet the requirements to run this command.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), app_commands.CommandOnCooldown: ErrorHandlerConfig( message_format="This command is on cooldown. Please wait {error.retry_after:.1f}s.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["RESOURCE_EXHAUSTED"], ), app_commands.BotMissingPermissions: ErrorHandlerConfig( message_format="I lack the required permission(s): {permissions}", detail_extractor=_extract_permissions_details, log_level="WARNING", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # Indicates a mismatch between the command signature registered with Discord # and the signature defined in the bot's code. @@ -271,11 +275,13 @@ def _extract_missing_argument_details(error: Exception) -> dict[str, Any]: message_format="Internal error: Command signature mismatch. Please report this.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # === Traditional Commands (discord.ext.commands) === commands.CommandError: ErrorHandlerConfig( message_format="A command error occurred: {error}", log_level="WARNING", + sentry_status=SentryManager.STATUS["UNKNOWN"], ), # CommandInvokeError wraps the actual exception raised within a prefix command. # It will be unwrapped in _handle_error, but this provides a fallback config. @@ -283,180 +289,217 @@ def _extract_missing_argument_details(error: Exception) -> dict[str, Any]: message_format="An internal error occurred while running the command.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), commands.ConversionError: ErrorHandlerConfig( message_format="Failed to convert argument: {error.original}", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), commands.MissingRole: ErrorHandlerConfig( message_format="You need the role {roles} to use this command.", detail_extractor=_extract_missing_role_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), commands.MissingAnyRole: ErrorHandlerConfig( message_format="You need one of the following roles: {roles}", detail_extractor=_extract_missing_any_role_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), commands.MissingPermissions: ErrorHandlerConfig( message_format="You lack the required permission(s): {permissions}", detail_extractor=_extract_permissions_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), # Error related to command flags (discord.ext.flags). commands.FlagError: ErrorHandlerConfig( message_format="Error processing command flags: {error}\nUsage: `{ctx.prefix}{usage}`", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), commands.BadFlagArgument: ErrorHandlerConfig( message_format="Invalid value for flag `{flag_name}`: {original_cause}\nUsage: `{ctx.prefix}{usage}`", detail_extractor=_extract_bad_flag_argument_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), commands.MissingRequiredFlag: ErrorHandlerConfig( message_format="Missing required flag: `{flag_name}`\nUsage: `{ctx.prefix}{usage}`", detail_extractor=_extract_missing_flag_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), # Generic check failure for prefix commands. commands.CheckFailure: ErrorHandlerConfig( message_format="You do not meet the requirements to run this command.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), commands.CommandOnCooldown: ErrorHandlerConfig( message_format="This command is on cooldown. Please wait {error.retry_after:.1f}s.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["RESOURCE_EXHAUSTED"], ), commands.MissingRequiredArgument: ErrorHandlerConfig( message_format="Missing required argument: `{param_name}`\nUsage: `{ctx.prefix}{usage}`", detail_extractor=_extract_missing_argument_details, send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), commands.TooManyArguments: ErrorHandlerConfig( message_format="You provided too many arguments.\nUsage: `{ctx.prefix}{usage}`", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), commands.NotOwner: ErrorHandlerConfig( message_format="This command can only be used by the bot owner.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), commands.BotMissingPermissions: ErrorHandlerConfig( message_format="I lack the required permission(s): {permissions}", detail_extractor=_extract_permissions_details, log_level="WARNING", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # Generic bad argument error. commands.BadArgument: ErrorHandlerConfig( message_format="Invalid argument provided: {error}", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), # Errors for when specific Discord entities are not found. commands.MemberNotFound: ErrorHandlerConfig( message_format="Could not find member: {error.argument}.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["NOT_FOUND"], ), commands.UserNotFound: ErrorHandlerConfig( message_format="Could not find user: {error.argument}.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["NOT_FOUND"], ), commands.ChannelNotFound: ErrorHandlerConfig( message_format="Could not find channel: {error.argument}.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["NOT_FOUND"], ), commands.RoleNotFound: ErrorHandlerConfig( message_format="Could not find role: {error.argument}.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["NOT_FOUND"], ), commands.EmojiNotFound: ErrorHandlerConfig( message_format="Could not find emoji: {error.argument}.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["NOT_FOUND"], ), commands.GuildNotFound: ErrorHandlerConfig( message_format="Could not find server: {error.argument}.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["NOT_FOUND"], ), # === Extension/Cog Loading Errors (discord.ext.commands) === commands.ExtensionError: ErrorHandlerConfig( message_format="Extension operation failed: {error}", log_level="WARNING", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), commands.ExtensionNotLoaded: ErrorHandlerConfig( message_format="Cannot reload extension `{error.name}` - it hasn't been loaded yet.", log_level="WARNING", send_to_sentry=False, + sentry_status=SentryManager.STATUS["ERROR"], ), commands.ExtensionNotFound: ErrorHandlerConfig( message_format="Extension `{error.name}` could not be found.", log_level="WARNING", send_to_sentry=False, + sentry_status=SentryManager.STATUS["NOT_FOUND"], ), commands.ExtensionAlreadyLoaded: ErrorHandlerConfig( message_format="Extension `{error.name}` is already loaded.", log_level="INFO", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), commands.ExtensionFailed: ErrorHandlerConfig( message_format="Extension `{error.name}` failed to load: {error.original}", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), commands.NoEntryPointError: ErrorHandlerConfig( message_format="Extension `{error.name}` is missing a setup function.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # === Custom Errors (defined in tux.utils.exceptions) === PermissionLevelError: ErrorHandlerConfig( message_format="You need permission level `{error.permission}` to use this command.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), AppCommandPermissionLevelError: ErrorHandlerConfig( message_format="You need permission level `{error.permission}` to use this command.", send_to_sentry=False, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), # === Code Execution Errors (from tux.utils.exceptions) === MissingCodeError: ErrorHandlerConfig( message_format="{error}", log_level="INFO", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), InvalidCodeFormatError: ErrorHandlerConfig( message_format="{error}", log_level="INFO", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), UnsupportedLanguageError: ErrorHandlerConfig( message_format="{error}", log_level="INFO", send_to_sentry=False, + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), CompilationError: ErrorHandlerConfig( message_format="{error}", log_level="INFO", send_to_sentry=True, # Monitor frequency of compilation failures + sentry_status=SentryManager.STATUS["INVALID_ARGUMENT"], ), CodeExecutionError: ErrorHandlerConfig( message_format="{error}", log_level="INFO", send_to_sentry=True, # Monitor general code execution issues + sentry_status=SentryManager.STATUS["ERROR"], ), # === Discord API & Client Errors === discord.ClientException: ErrorHandlerConfig( message_format="A client-side error occurred: {error}", log_level="WARNING", send_to_sentry=True, # Monitor frequency of generic client errors + sentry_status=SentryManager.STATUS["ERROR"], ), discord.HTTPException: ErrorHandlerConfig( message_format="An HTTP error occurred while communicating with Discord: {error.status} {error.text}", log_level="WARNING", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), discord.RateLimited: ErrorHandlerConfig( message_format="We are being rate-limited by Discord. Please try again in {error.retry_after:.1f} seconds.", log_level="WARNING", send_to_sentry=True, # Track rate limits + sentry_status=SentryManager.STATUS["RESOURCE_EXHAUSTED"], ), # Generic Forbidden/NotFound often indicate deleted resources or permission issues caught by more specific exceptions. # These provide fallbacks. @@ -464,34 +507,40 @@ def _extract_missing_argument_details(error: Exception) -> dict[str, Any]: message_format="I don't have permission to perform that action. Error: {error.text}", log_level="WARNING", send_to_sentry=True, + sentry_status=SentryManager.STATUS["PERMISSION_DENIED"], ), discord.NotFound: ErrorHandlerConfig( message_format="Could not find the requested resource (it might have been deleted). Error: {error.text}", log_level="INFO", send_to_sentry=False, + sentry_status=SentryManager.STATUS["NOT_FOUND"], ), discord.DiscordServerError: ErrorHandlerConfig( message_format="Discord reported a server error ({error.status}). Please try again later. Error: {error.text}", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # Indicates unexpected data from Discord, potentially a library or API issue. discord.InvalidData: ErrorHandlerConfig( message_format="Received invalid data from Discord. Please report this if it persists.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # Specific to interactions, raised if interaction.response.send_message is called more than once. discord.InteractionResponded: ErrorHandlerConfig( message_format="This interaction has already been responded to.", log_level="WARNING", # Usually indicates a logic error in command code send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # Raised when Application ID is needed but not available (e.g., for app command sync). discord.MissingApplicationID: ErrorHandlerConfig( message_format="Internal setup error: Missing Application ID.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # === Common Python Built-in Errors === # These usually indicate internal logic errors, so show a generic message to the user @@ -500,52 +549,62 @@ def _extract_missing_argument_details(error: Exception) -> dict[str, Any]: message_format="An internal error occurred due to an invalid value.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), TypeError: ErrorHandlerConfig( message_format="An internal error occurred due to a type mismatch.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), KeyError: ErrorHandlerConfig( message_format="An internal error occurred while looking up data.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), IndexError: ErrorHandlerConfig( message_format="An internal error occurred while accessing a sequence.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), AttributeError: ErrorHandlerConfig( message_format="An internal error occurred while accessing an attribute.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), ZeroDivisionError: ErrorHandlerConfig( message_format="An internal error occurred during a calculation (division by zero).", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # === Additional Discord Client/Connection Errors === discord.LoginFailure: ErrorHandlerConfig( message_format="Bot authentication failed. Please check the bot token configuration.", log_level="CRITICAL", send_to_sentry=True, + sentry_status=SentryManager.STATUS["UNAUTHENTICATED"], ), discord.ConnectionClosed: ErrorHandlerConfig( message_format="Connection to Discord was closed unexpectedly. Attempting to reconnect...", log_level="WARNING", send_to_sentry=True, + sentry_status=SentryManager.STATUS["UNKNOWN"], ), discord.PrivilegedIntentsRequired: ErrorHandlerConfig( message_format="This bot requires privileged intents to function properly. Please enable them in the Discord Developer Portal.", log_level="CRITICAL", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), discord.GatewayNotFound: ErrorHandlerConfig( message_format="Could not connect to Discord's gateway. This may be a temporary issue.", log_level="ERROR", send_to_sentry=True, + sentry_status=SentryManager.STATUS["ERROR"], ), # Note: InvalidArgument, NoMoreItems, and TooManyRequests are not available in all discord.py versions # or are handled by other existing exceptions like HTTPException @@ -640,7 +699,7 @@ async def _handle_error(self, source: ContextOrInteraction, error: Exception) -> root_error = _unwrap_error(error) # --- Sentry Transaction Finalization (Added) --- - self._finish_sentry_transaction_on_error(source, root_error) + self.bot.sentry_manager.finish_transaction_on_error() # ----------------------------------------------- # Step 3: Gather context using the resolved root error. @@ -649,11 +708,8 @@ async def _handle_error(self, source: ContextOrInteraction, error: Exception) -> log_context = self._get_log_context(source, user, root_error) log_context["initial_error_type"] = type(error).__name__ # Keep initial error type for context - # Step 4: Determine handling configuration. - config = ERROR_CONFIG_MAP.get(error_type) - # Step 5: Format the user-facing message. - message = self._get_formatted_message(source, root_error, config) + message = self._get_formatted_message(source, root_error, ERROR_CONFIG_MAP.get(error_type)) # Step 6: Create the error embed. embed = EmbedCreator.create_embed( @@ -673,20 +729,29 @@ async def _handle_error(self, source: ContextOrInteraction, error: Exception) -> log_context["send_error"] = str(send_exc) log_context["send_error_type"] = type(send_exc).__name__ logger.bind(**log_context).exception("Unexpected failure during error message sending.") - self._capture_exception_with_context( + self.bot.sentry_manager.capture_exception( send_exc, - log_context, - "ERROR", + context=log_context, + level="error", tags={"failure_point": "send_response"}, ) return # Step 8 & 9: Log and report. - sentry_event_id = self._log_and_report_error(root_error, error_type, log_context, config) + sentry_event_id = self._log_and_report_error( + root_error, + error_type, + log_context, + ERROR_CONFIG_MAP.get(error_type), + ) # Step 10: Attempt edit with Sentry ID. await self._try_edit_message_with_sentry_id(sent_message, sentry_event_id, log_context) + # Set context information for better Sentry reporting + if self.bot.sentry_manager.is_initialized: + self.bot.sentry_manager.set_command_context(source) + @staticmethod def _get_user_from_source(source: ContextOrInteraction) -> discord.User | discord.Member: """Helper method to consistently extract the user object from either source type.""" @@ -704,71 +769,17 @@ def _get_log_context( """ Builds a dictionary containing structured context information about the error event. - Includes information about invocation type (prefix/app) and definition type (hybrid/prefix_only/app_only). - - Parameters - ---------- - source : ContextOrInteraction - The source of the error. - user : Union[discord.User, discord.Member] - The user who triggered the error. - error : Exception - The exception that occurred. + Args: + source: The source of the error. + user: The user who triggered the error. + error: The exception that occurred. - Returns - ------- - dict[str, Any] + Returns: A dictionary with context keys like user_id, command_name, guild_id, etc. """ - context: dict[str, Any] = { - "user_id": user.id, - "user_name": str(user), - "error": str(error), - "error_type": type(error).__name__, - } - - # Determine invocation method first using ternary operator - invoked_via_interaction: bool = ( - True if isinstance(source, discord.Interaction) else source.interaction is not None - ) - - # Set command_type based on invocation method - context["command_type"] = "app" if invoked_via_interaction else "prefix" - context["invoked_via_interaction"] = invoked_via_interaction - - # Add specific details based on source type - if isinstance(source, discord.Interaction): - context["interaction_id"] = source.id - context["channel_id"] = source.channel_id - context["guild_id"] = source.guild_id - # Determine definition type for app invocation - if source.command: - context["command_name"] = source.command.qualified_name - prefix_command = self.bot.get_command(source.command.qualified_name) - if prefix_command and isinstance(prefix_command, commands.HybridCommand | commands.HybridGroup): - context["command_definition"] = "hybrid" - else: - context["command_definition"] = "app" - else: - context["command_definition"] = "unknown" - - else: # Source is commands.Context - context["message_id"] = source.message.id - context["channel_id"] = source.channel.id - context["guild_id"] = source.guild.id if source.guild else None - # Determine definition type for prefix invocation - if source.command: - context["command_name"] = source.command.qualified_name - context["command_prefix"] = source.prefix - context["command_invoked_with"] = source.invoked_with - if isinstance(source.command, commands.HybridCommand | commands.HybridGroup): - context["command_definition"] = "hybrid" - else: - context["command_definition"] = "prefix" - else: - context["command_invoked_with"] = source.invoked_with - context["command_definition"] = "unknown" - + context = get_interaction_context(source) + context["error"] = str(error) + context["error_type"] = type(error).__name__ return context def _get_formatted_message( @@ -897,152 +908,6 @@ async def _send_error_response(self, source: ContextOrInteraction, embed: discor mention_author=False, # Avoid potentially annoying pings for errors. ) - # --- Sentry Transaction Finalization Logic (Added) --- - def _finish_sentry_transaction_on_error(self, source: ContextOrInteraction, root_error: Exception) -> None: - """Attempts to find and finish an active Sentry transaction based on the error source.""" - if not sentry_sdk.is_initialized(): - return - - transaction: Any | None = None - transaction_id: int | None = None - command_type: str | None = None - - # Status mapping dictionaries - app_command_status_map = { - app_commands.CommandNotFound: SENTRY_STATUS_NOT_FOUND, - app_commands.CheckFailure: SENTRY_STATUS_PERMISSION_DENIED, - app_commands.TransformerError: SENTRY_STATUS_INVALID_ARGUMENT, - } - - prefix_command_status_map = { - commands.CommandNotFound: SENTRY_STATUS_NOT_FOUND, - commands.UserInputError: SENTRY_STATUS_INVALID_ARGUMENT, - commands.CheckFailure: SENTRY_STATUS_PERMISSION_DENIED, - commands.CommandOnCooldown: SENTRY_STATUS_RESOURCE_EXHAUSTED, - commands.MaxConcurrencyReached: SENTRY_STATUS_RESOURCE_EXHAUSTED, - } - - # Default status - status: str = SENTRY_STATUS_INTERNAL_ERROR - - try: - # Determine ID and type based on source - if isinstance(source, discord.Interaction): - transaction_id = source.id - command_type = "app_command" - - # Lookup status in mapping - for error_type, error_status in app_command_status_map.items(): - if isinstance(root_error, error_type): - status = error_status - break - - elif isinstance(source, commands.Context): # type: ignore - transaction_id = source.message.id - command_type = "prefix_command" - - # Lookup status in mapping - for error_type, error_status in prefix_command_status_map.items(): - if isinstance(root_error, error_type): - status = error_status - break - - else: - logger.warning(f"Unknown error source type encountered: {type(source).__name__}") - return # Cannot determine transaction ID - - # Try to pop the transaction from the bot's central store - if transaction_id is not None: # type: ignore - transaction = self.bot.active_sentry_transactions.pop(transaction_id, None) - - if transaction: - transaction.set_status(status) - transaction.finish() - logger.trace( - f"Finished Sentry transaction ({status}) for errored {command_type} (ID: {transaction_id})", - ) - - except Exception as e: - logger.exception(f"Error during Sentry transaction finalization for ID {transaction_id}: {e}") - # Capture this specific failure to Sentry if needed - sentry_sdk.capture_exception(e, hint={"context": "Sentry transaction finalization"}) - - # --- Sentry Reporting Logic --- - - @staticmethod - def _capture_exception_with_context( - error: Exception, - log_context: dict[str, Any], - level: str = "ERROR", - tags: dict[str, str] | None = None, - ) -> str | None: - """ - Safely sends an exception to Sentry, enriching it with structured context. - - This method pushes a new scope to Sentry, adds user information, the detailed - log context, the specified logging level, and any custom tags before capturing - the exception. It includes error handling to prevent Sentry SDK issues from - crashing the error handler itself. - - Parameters - ---------- - error : Exception - The exception to report. - log_context : dict[str, Any] - The dictionary of context information gathered by `_get_log_context`. - level : str, optional - The severity level for the Sentry event ('info', 'warning', 'error', etc.). Defaults to "ERROR". - tags : Optional[dict[str, str]], optional - Additional key-value tags to attach to the Sentry event. Defaults to None. - - Returns - ------- - Optional[str] - The Sentry event ID if capture was successful, otherwise None. - """ - event_id: str | None = None - try: - # Create an isolated scope for this Sentry event. - with sentry_sdk.push_scope() as scope: - # Add user identification. - scope.set_user({"id": log_context.get("user_id"), "username": log_context.get("user_name")}) - # Attach the detailed context dictionary under the 'discord' key. - scope.set_context("discord", log_context) - # Set the severity level of the event. - scope.level = level.lower() - - # --- Add specific tags for better filtering/searching --- # - scope.set_tag("command_name", log_context.get("command_name", "Unknown")) - scope.set_tag("command_type", log_context.get("command_type", "Unknown")) - scope.set_tag("command_definition", log_context.get("command_definition", "Unknown")) - - # Add new tag for interaction check - scope.set_tag("invoked_via_interaction", str(log_context.get("invoked_via_interaction", False)).lower()) - - # Handle potential None for guild_id (e.g., in DMs) - guild_id = log_context.get("guild_id") - scope.set_tag("guild_id", str(guild_id) if guild_id else "DM") - - # Add any custom tags provided when calling this function. - if tags: - for key, value in tags.items(): - scope.set_tag(key, value) - - # Send the exception event to Sentry and capture the returned event ID. - event_id = sentry_sdk.capture_exception(error) - - # Debug log indicating successful reporting. - if event_id: - logger.debug(f"Reported {type(error).__name__} to Sentry ({event_id})") - else: - logger.warning(f"Captured {type(error).__name__} but Sentry returned no ID.") - - except Exception as sentry_exc: - # Log if reporting to Sentry fails, but don't let it stop the error handler. - logger.error(f"Failed to report {type(error).__name__} to Sentry: {sentry_exc}") - - return event_id # Return the event ID (or None if capture failed) - def _log_and_report_error( self, root_error: Exception, @@ -1057,16 +922,24 @@ def _log_and_report_error( logger.bind(**log_context).log(config.log_level, f"Handled expected error: {error_type.__name__}") if config.send_to_sentry: # Optionally send handled errors to Sentry. - sentry_event_id = self._capture_exception_with_context( + sentry_event_id = self.bot.sentry_manager.capture_exception( root_error, - log_context, - config.log_level, + context=log_context, + level=cast(LogLevelStr, config.log_level.lower()), tags={"error_type": "handled"}, ) else: # Log unhandled errors at ERROR level and always report to Sentry. - logger.bind(**log_context).error(f"Unhandled error: {error_type.__name__}") - sentry_event_id = self._log_and_capture_unhandled(root_error, log_context) + trace = traceback.format_exception(type(root_error), root_error, root_error.__traceback__) + formatted_trace = "".join(trace) + logger.bind(**log_context).error(f"Unhandled Error: {root_error}\nTraceback:\n{formatted_trace}") + + sentry_event_id = self.bot.sentry_manager.capture_exception( + root_error, + context=log_context, + level="error", + tags={"error_type": "unhandled"}, + ) return sentry_event_id async def _try_edit_message_with_sentry_id( @@ -1127,36 +1000,6 @@ async def _try_edit_message_with_sentry_id( exc_info=unexpected_edit_exc, ) - def _log_and_capture_unhandled(self, error: Exception, log_context: dict[str, Any]) -> str | None: - """ - Handles errors not found in the `ERROR_CONFIG_MAP`. - - It logs the error with its full traceback at the ERROR level and reports - it to Sentry, tagging it as 'unhandled'. - - Parameters - ---------- - error : Exception - The unhandled exception. - log_context : dict[str, Any] - The context dictionary for logging and reporting. - - Returns - ------- - Optional[str] - The Sentry event ID if capture was successful, otherwise None. - """ - # Generate the formatted traceback string. - trace = traceback.format_exception(type(error), error, error.__traceback__) - formatted_trace = "".join(trace) - - # Log the error locally with full traceback and context. - logger.bind(**log_context).error(f"Unhandled Error: {error}\nTraceback:\n{formatted_trace}") - - # Report the unhandled error to Sentry with high severity. - # Directly return the result from _capture_exception_with_context. - return self._capture_exception_with_context(error, log_context, "ERROR", tags={"error_type": "unhandled"}) - # --- Command Suggestion Logic --- async def _suggest_command(self, ctx: commands.Context[Tux]) -> list[str] | None: @@ -1197,7 +1040,7 @@ async def _suggest_command(self, ctx: commands.Context[Tux]) -> list[str] | None log_context["suggest_max_dist"] = max_distance log_context["suggest_max_count"] = max_suggestions - logger.bind(**log_context).debug("Attempting command suggestion.") + logger.bind(**log_context).trace("Attempting command suggestion.") # Store potential matches: {qualified_name: min_distance} command_distances: dict[str, int] = {} @@ -1229,7 +1072,7 @@ async def _suggest_command(self, ctx: commands.Context[Tux]) -> list[str] | None # If no commands were within the distance threshold. if not command_distances: - logger.bind(**log_context).debug("No close command matches found for suggestion.") + logger.bind(**log_context).trace("No close command matches found for suggestion.") return None # Sort the found commands by distance (closest first). @@ -1239,7 +1082,7 @@ async def _suggest_command(self, ctx: commands.Context[Tux]) -> list[str] | None final_suggestions = [cmd_name for cmd_name, _ in sorted_suggestions[:max_suggestions]] log_context["suggestions_found"] = final_suggestions - logger.bind(**log_context).debug("Command suggestions generated.") + logger.bind(**log_context).trace("Command suggestions generated.") # Return the list of names, or None if the list is empty (shouldn't happen here, but safety check). return final_suggestions or None @@ -1277,7 +1120,7 @@ async def _handle_command_not_found(self, ctx: commands.Context[Tux]) -> None: # Send the suggestion message, automatically deleting it after a short period. await ctx.send(embed=embed, delete_after=SUGGESTION_DELETE_AFTER) log_context["suggestions_sent"] = suggestions - logger.bind(**log_context).info("Sent command suggestions.") + logger.bind(**log_context).debug("Sent command suggestions.") except discord.HTTPException as e: # Log if sending the suggestion message fails. log_context["send_error"] = str(e) @@ -1290,7 +1133,7 @@ async def _handle_command_not_found(self, ctx: commands.Context[Tux]) -> None: else: # Log that the command wasn't found and no suitable suggestions were generated. # No message is sent back to the user in this case to avoid unnecessary noise. - logger.bind(**log_context).info("Command not found, no suggestions generated.") + logger.bind(**log_context).debug("Command not found, no suggestions generated.") # --- Discord Event Listeners --- diff --git a/tux/handlers/sentry.py b/tux/handlers/sentry.py index cd849830d..476cdb14e 100644 --- a/tux/handlers/sentry.py +++ b/tux/handlers/sentry.py @@ -1,4 +1,6 @@ -from typing import Any, ClassVar +from __future__ import annotations + +from typing import Any import discord import sentry_sdk @@ -6,6 +8,7 @@ from loguru import logger from tux.bot import Tux +from tux.utils.sentry_manager import SentryManager # Type alias using PEP695 syntax type CommandObject = ( @@ -15,188 +18,43 @@ class SentryHandler(commands.Cog): """ - Handles Sentry transaction tracking for commands and interactions. + Handles Sentry transaction status for completed commands. - This cog listens for Discord events to create and complete Sentry - transactions, providing performance monitoring and error context - for both prefix commands and slash commands. + This cog listens for command completion events to set the Sentry + transaction status to 'ok', complementing the error handler which + sets failure statuses. """ - # Standard Sentry transaction statuses with ClassVar - # See: https://develop.sentry.dev/sdk/event-payloads/transaction/#transaction-status - STATUS: ClassVar[dict[str, str]] = { - "OK": "ok", - "UNKNOWN": "unknown", - "ERROR": "internal_error", - "NOT_FOUND": "not_found", - "PERMISSION_DENIED": "permission_denied", - "INVALID_ARGUMENT": "invalid_argument", - "RESOURCE_EXHAUSTED": "resource_exhausted", - "UNAUTHENTICATED": "unauthenticated", - "CANCELLED": "cancelled", - } - - def __init__(self, bot: Tux) -> None: - """Initialize the Sentry handler cog. - - Parameters - ---------- - bot : Tux - The bot instance to attach the listeners to - """ - self.bot = bot - logger.info("Sentry handler initialized") - - def _is_sentry_available(self) -> bool: - """Check if Sentry is initialized and available for use. - - Returns - ------- - bool - True if Sentry is initialized, False otherwise - """ - return sentry_sdk.is_initialized() - - def _create_transaction( - self, - operation: str, - name: str, - description: str, - tags: dict[str, Any], - ) -> Any | None: - """Create a Sentry transaction with the given parameters. - - Parameters - ---------- - operation : str - The operation type (e.g., "discord.command") - name : str - The name of the transaction - description : str - A description of the transaction - tags : dict[str, Any] - Tags to attach to the transaction - - Returns - ------- - Optional[Any] - The created transaction or None if Sentry is not initialized - """ - if not self._is_sentry_available(): - return None - - try: - transaction = sentry_sdk.start_transaction(op=operation, name=name, description=description) - - # Add all tags to the transaction - for key, value in tags.items(): - transaction.set_tag(key, value) - except Exception as e: - logger.error(f"Error creating Sentry transaction: {e}") - sentry_sdk.capture_exception(e) - return None - else: - return transaction - - def _finish_transaction(self, object_id: int, status: str = STATUS["OK"]) -> None: - """Finish a stored transaction with the given status. - - Parameters - ---------- - object_id : int - The ID of the interaction or message - status : str - The status to set on the transaction - """ - if not self._is_sentry_available(): - return - - if transaction := self.bot.active_sentry_transactions.pop(object_id, None): - transaction.set_status(status) - transaction.finish() - logger.trace(f"Finished Sentry transaction ({status}) for {transaction.name}") - - @commands.Cog.listener() - async def on_command(self, ctx: commands.Context[Tux]) -> None: + def __init__(self, sentry_manager: SentryManager) -> None: """ - Start a Sentry transaction for a prefix command. + Initialize the Sentry handler cog. Parameters ---------- - ctx : commands.Context[Tux] - The command context + sentry_manager : SentryManager + The Sentry manager instance. """ - if not self._is_sentry_available(): - return - - if command_name := (ctx.command.qualified_name if ctx.command else "Unknown Command"): - tags = { - "discord.command.name": command_name, - "discord.guild.id": str(ctx.guild.id) if ctx.guild else "DM", - "discord.channel.id": ctx.channel.id, - "discord.user.id": ctx.author.id, - "discord.message.id": ctx.message.id, - "discord.command.type": "prefix", - } - - if transaction := self._create_transaction( - operation="discord.command", - name=command_name, - description=ctx.message.content, - tags=tags, - ): - self.bot.active_sentry_transactions[ctx.message.id] = transaction - logger.trace(f"Started transaction for prefix command: {command_name}") + self.sentry_manager = sentry_manager + logger.info("Sentry handler initialized") @commands.Cog.listener() async def on_command_completion(self, ctx: commands.Context[Tux]) -> None: """ - Finish the Sentry transaction for a completed prefix command. + Sets the Sentry transaction status to 'ok' for a completed prefix command. Parameters ---------- ctx : commands.Context[Tux] The command context """ - self._finish_transaction(ctx.message.id, self.STATUS["OK"]) - - @commands.Cog.listener() - async def on_interaction(self, interaction: discord.Interaction) -> None: - """ - Start a Sentry transaction for application command interactions. - - Parameters - ---------- - interaction : discord.Interaction - The interaction object - """ - if not self._is_sentry_available() or interaction.type != discord.InteractionType.application_command: - return - - if command_name := (interaction.command.qualified_name if interaction.command else "Unknown App Command"): - tags = { - "discord.command.name": command_name, - "discord.guild.id": str(interaction.guild_id) if interaction.guild_id else "DM", - "discord.channel.id": interaction.channel_id, - "discord.user.id": interaction.user.id, - "discord.interaction.id": interaction.id, - "discord.interaction.type": interaction.type.name, - "discord.command.type": "slash", - } - - if transaction := self._create_transaction( - operation="discord.app_command", - name=command_name, - description=f"/{command_name}", - tags=tags, - ): - self.bot.active_sentry_transactions[interaction.id] = transaction - logger.trace(f"Started transaction for app command: {command_name}") + if self.sentry_manager.is_initialized and (span := sentry_sdk.get_current_span()): + span.set_status(self.sentry_manager.STATUS["OK"]) + logger.trace(f"Set Sentry span status to 'ok' for command: {ctx.command}") @commands.Cog.listener() async def on_app_command_completion(self, interaction: discord.Interaction, command: CommandObject) -> None: """ - Finish the Sentry transaction for a completed application command. + Sets the Sentry transaction status to 'ok' for a completed application command. Parameters ---------- @@ -205,9 +63,11 @@ async def on_app_command_completion(self, interaction: discord.Interaction, comm command : CommandObject The command that was completed """ - self._finish_transaction(interaction.id, self.STATUS["OK"]) + if self.sentry_manager.is_initialized and (span := sentry_sdk.get_current_span()): + span.set_status(self.sentry_manager.STATUS["OK"]) + logger.trace(f"Set Sentry span status to 'ok' for app command: {command.name}") async def setup(bot: Tux) -> None: """Add the SentryHandler cog to the bot.""" - await bot.add_cog(SentryHandler(bot)) + await bot.add_cog(SentryHandler(bot.sentry_manager)) diff --git a/tux/utils/sentry.py b/tux/utils/sentry.py deleted file mode 100644 index 1108b9825..000000000 --- a/tux/utils/sentry.py +++ /dev/null @@ -1,291 +0,0 @@ -""" -Sentry instrumentation utilities for tracing and performance monitoring. - -This module provides decorators and context managers for instrumenting -code with Sentry transactions and spans, simplifying the addition of -performance monitoring and error tracking. -""" - -import asyncio -import functools -import time -import traceback -from collections.abc import Callable, Generator -from contextlib import contextmanager -from typing import Any, ParamSpec, TypeVar, cast - -import sentry_sdk - -# Type variables for better type hints with generic functions -P = ParamSpec("P") -T = TypeVar("T") -R = TypeVar("R") - - -class DummySpan: - """A dummy span object for when Sentry is not initialized.""" - - def set_tag(self, *args: Any, **kwargs: Any) -> "DummySpan": - return self - - def set_data(self, *args: Any, **kwargs: Any) -> "DummySpan": - return self - - def set_status(self, *args: Any, **kwargs: Any) -> "DummySpan": - return self - - def set_name(self, name: str) -> "DummySpan": - return self - - -class DummyTransaction(DummySpan): - """A dummy transaction object for when Sentry is not initialized.""" - - -def safe_set_name(obj: Any, name: str) -> None: - """ - Safely set the name on a span or transaction object. - - Parameters - ---------- - obj : Any - The span or transaction object - name : str - The name to set - """ - if hasattr(obj, "set_name"): - # Use getattr to avoid static type checking issues - set_name_func = obj.set_name - set_name_func(name) - - -def transaction( - op: str, - name: str | None = None, - description: str | None = None, -) -> Callable[[Callable[P, R]], Callable[P, R]]: - """ - Decorator to wrap a function with a Sentry transaction. - - Parameters - ---------- - op : str - The operation name for the transaction. - name : Optional[str] - The name for the transaction. Defaults to the function name. - description : Optional[str] - A description of what the transaction is doing. - - Returns - ------- - Callable - The decorated function. - """ - - def decorator(func: Callable[P, R]) -> Callable[P, R]: - if asyncio.iscoroutinefunction(func): - - @functools.wraps(func) - async def async_transaction_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: - transaction_name = name or f"{func.__module__}.{func.__qualname__}" - start_time = time.perf_counter() - - if not sentry_sdk.is_initialized(): - return await func(*args, **kwargs) - - with sentry_sdk.start_transaction( - op=op, - name=transaction_name, - description=description or f"Executing {func.__qualname__}", - ) as transaction_obj: - try: - result = await func(*args, **kwargs) - except Exception as e: - transaction_obj.set_status("internal_error") - transaction_obj.set_data("error", str(e)) - transaction_obj.set_data("traceback", traceback.format_exc()) - raise - else: - transaction_obj.set_status("ok") - return result - finally: - transaction_obj.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) - - return cast(Callable[P, R], async_transaction_wrapper) - - @functools.wraps(func) - def sync_transaction_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: - transaction_name = name or f"{func.__module__}.{func.__qualname__}" - start_time = time.perf_counter() - - if not sentry_sdk.is_initialized(): - return func(*args, **kwargs) - - with sentry_sdk.start_transaction( - op=op, - name=transaction_name, - description=description or f"Executing {func.__qualname__}", - ) as transaction_obj: - try: - result = func(*args, **kwargs) - except Exception as e: - transaction_obj.set_status("internal_error") - transaction_obj.set_data("error", str(e)) - transaction_obj.set_data("traceback", traceback.format_exc()) - raise - else: - transaction_obj.set_status("ok") - return result - finally: - transaction_obj.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) - - return sync_transaction_wrapper - - return decorator - - -def span(op: str, description: str | None = None) -> Callable[[Callable[P, R]], Callable[P, R]]: - """ - Decorator to wrap a function with a Sentry span. - - Parameters - ---------- - op : str - The operation name for the span. - description : Optional[str] - A description of what the span is doing. - - Returns - ------- - Callable - The decorated function. - """ - - def decorator(func: Callable[P, R]) -> Callable[P, R]: - if asyncio.iscoroutinefunction(func): - - @functools.wraps(func) - async def async_span_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: - span_description = description or f"Executing {func.__qualname__}" - start_time = time.perf_counter() - - if not sentry_sdk.is_initialized(): - return await func(*args, **kwargs) - - with sentry_sdk.start_span(op=op, description=span_description) as span_obj: - try: - # Use the helper function to safely set name if available - safe_set_name(span_obj, func.__qualname__) - - result = await func(*args, **kwargs) - except Exception as e: - span_obj.set_status("internal_error") - span_obj.set_data("error", str(e)) - span_obj.set_data("traceback", traceback.format_exc()) - raise - else: - span_obj.set_status("ok") - return result - finally: - span_obj.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) - - return cast(Callable[P, R], async_span_wrapper) - - @functools.wraps(func) - def sync_span_wrapper(*args: P.args, **kwargs: P.kwargs) -> R: - span_description = description or f"Executing {func.__qualname__}" - start_time = time.perf_counter() - - if not sentry_sdk.is_initialized(): - return func(*args, **kwargs) - - with sentry_sdk.start_span(op=op, description=span_description) as span_obj: - try: - # Use the helper function to safely set name if available - safe_set_name(span_obj, func.__qualname__) - - result = func(*args, **kwargs) - except Exception as e: - span_obj.set_status("internal_error") - span_obj.set_data("error", str(e)) - span_obj.set_data("traceback", traceback.format_exc()) - raise - else: - span_obj.set_status("ok") - return result - finally: - span_obj.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) - - return sync_span_wrapper - - return decorator - - -@contextmanager -def start_span(op: str, description: str = "") -> Generator[DummySpan | Any]: - """ - Context manager for creating a Sentry span. - - Parameters - ---------- - op : str - The operation name for the span. - description : str - A description of what the span is doing. - - Yields - ------ - Union[DummySpan, Any] - The Sentry span object or a dummy object if Sentry is not initialized. - """ - start_time = time.perf_counter() - - if not sentry_sdk.is_initialized(): - # Create a dummy context if Sentry is not available - dummy = DummySpan() - try: - yield dummy - finally: - pass - else: - with sentry_sdk.start_span(op=op, description=description) as span: - try: - yield span - finally: - span.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) - - -@contextmanager -def start_transaction(op: str, name: str, description: str = "") -> Generator[DummyTransaction | Any]: - """ - Context manager for creating a Sentry transaction. - - Parameters - ---------- - op : str - The operation name for the transaction. - name : str - The name for the transaction. - description : str - A description of what the transaction is doing. - - Yields - ------ - Union[DummyTransaction, Any] - The Sentry transaction object or a dummy object if Sentry is not initialized. - """ - start_time = time.perf_counter() - - if not sentry_sdk.is_initialized(): - # Create a dummy context if Sentry is not available - dummy = DummyTransaction() - try: - yield dummy - finally: - pass - else: - with sentry_sdk.start_transaction(op=op, name=name, description=description) as transaction: - try: - yield transaction - finally: - transaction.set_data("duration_ms", (time.perf_counter() - start_time) * 1000) From 12c9d7de31c590bba1eca1a738de42ee22306e31 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 01:47:33 -0400 Subject: [PATCH 03/34] fix: add missing get_prefix function to resolve type checking errors - Add get_prefix function to tux/app.py for dynamic guild prefix resolution - Update bot initialization to use get_prefix as command_prefix - Resolves remaining dependency issues from branch splitting --- tux/app.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tux/app.py b/tux/app.py index 5059ab790..3bd954bc2 100644 --- a/tux/app.py +++ b/tux/app.py @@ -24,6 +24,19 @@ from tux.utils.sentry_manager import SentryManager +async def get_prefix(bot: Tux, message: discord.Message) -> list[str]: + """Resolve the command prefix for a guild or use the default prefix.""" + prefix: str | None = None + if message.guild: + try: + from tux.database.controllers import DatabaseController # noqa: PLC0415 + + prefix = await DatabaseController().guild_config.get_guild_prefix(message.guild.id) + except Exception as e: + logger.error(f"Error getting guild prefix: {e}") + return [prefix or CONFIG.DEFAULT_PREFIX] + + class TuxApp: """ Orchestrates the startup, shutdown, and environment for the Tux bot. @@ -83,6 +96,7 @@ async def start(self) -> None: # Instantiate the main bot class with all necessary parameters. self.bot = Tux( + command_prefix=get_prefix, strip_after_prefix=True, case_insensitive=True, intents=discord.Intents.all(), From 9850ff45774160539b5884adb6bb94ce7ef83f36 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 01:49:03 -0400 Subject: [PATCH 04/34] fix: suppress type checker warnings for discord.py tasks.Loop.coro attribute - Add type: ignore comments for task_loop.coro access - Resolves reportUnknownMemberType error in task_manager.py - The .coro attribute exists at runtime but isn't in discord.py type stubs --- tux/utils/task_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tux/utils/task_manager.py b/tux/utils/task_manager.py index e5d6c065f..24b5f0d0e 100644 --- a/tux/utils/task_manager.py +++ b/tux/utils/task_manager.py @@ -169,9 +169,9 @@ def setup_task_instrumentation(self) -> None: try: # We are confident .coro exists and is a callable coroutine on a tasks.Loop instance. # The type checker struggles with this dynamic attribute from the discord.py library. - original_coro = cast(Callable[..., Coroutine[Any, Any, None]], task_loop.coro) + original_coro = cast(Callable[..., Coroutine[Any, Any, None]], task_loop.coro) # type: ignore[attr-defined] decorated_loop = transaction(op="task.run", name=f"task.{task_name}")(original_coro) - task_loop.coro = decorated_loop + task_loop.coro = decorated_loop # type: ignore[attr-defined] logger.debug(f"Instrumented task: {task_name}") except AttributeError: logger.warning(f"Could not find a 'coro' on task {task_name}. Skipping instrumentation.") From cd34a6ae400a8b215249e7580607da04885d5079 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 02:56:43 -0400 Subject: [PATCH 05/34] fix: simplify operation handling in BaseController - Refactor operation execution to directly return the result of the awaited operation - Improve readability by reducing unnecessary variable assignment --- tux/database/controllers/base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tux/database/controllers/base.py b/tux/database/controllers/base.py index 24d7a909e..96450bc09 100644 --- a/tux/database/controllers/base.py +++ b/tux/database/controllers/base.py @@ -112,8 +112,7 @@ async def _execute_query( # Remove span creation here to avoid duplication with controller-level spans # Controller methods should handle their own tracing for meaningful operations try: - result = await operation() - return result + return await operation() except Exception as e: logger.error(f"{error_msg}: {e}") raise From 138719ded7c013a114db1d7261ae6177805fe6d3 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 02:56:50 -0400 Subject: [PATCH 06/34] refactor: enhance SentryManager with operation mapping and span filtering - Introduced static methods for mapping database operations and transactions to standardized types. - Implemented span filtering and grouping to improve trace readability and reduce noise. - Updated transaction handling to streamline noise suppression and enhance reporting clarity. --- tux/utils/sentry_manager.py | 234 ++++++++++++++++++++++-------------- 1 file changed, 146 insertions(+), 88 deletions(-) diff --git a/tux/utils/sentry_manager.py b/tux/utils/sentry_manager.py index ca124fff3..7fef4807d 100644 --- a/tux/utils/sentry_manager.py +++ b/tux/utils/sentry_manager.py @@ -115,6 +115,110 @@ def _before_send(event: Event, hint: Hint) -> Event | None: return event + @staticmethod + def _get_span_operation_mapping(op: str) -> str: + """ + Map database controller operations to standardized operation types. + + Parameters + ---------- + op : str + The original operation name + + Returns + ------- + str + The standardized operation type + """ + if not op.startswith("db.controller."): + return op + + # Use dictionary lookup instead of if/elif chain + operation_mapping = { + "get_": "db.read", + "find_": "db.read", + "create_": "db.create", + "update_": "db.update", + "increment_": "db.update", + "delete_": "db.delete", + "count_": "db.count", + } + + return next((mapped_op for prefix, mapped_op in operation_mapping.items() if prefix in op), "db.other") + + @staticmethod + def _get_transaction_operation_mapping(transaction_name: str) -> str: + """ + Map database controller transaction names to standardized operation types. + + Parameters + ---------- + transaction_name : str + The original transaction name + + Returns + ------- + str + The standardized transaction operation type + """ + if not transaction_name.startswith("db.controller."): + return transaction_name + + # Use dictionary lookup instead of if/elif chain + operation_mapping = { + "get_": "db.controller.read_operation", + "find_": "db.controller.read_operation", + "create_": "db.controller.create_operation", + "update_": "db.controller.update_operation", + "increment_": "db.controller.update_operation", + "delete_": "db.controller.delete_operation", + "count_": "db.controller.count_operation", + } + + return next( + (mapped_op for prefix, mapped_op in operation_mapping.items() if prefix in transaction_name), + "db.controller.other_operation", + ) + + @staticmethod + def _filter_and_group_spans(spans: list[dict[str, Any]]) -> list[dict[str, Any]]: + """ + Filter and group spans to reduce noise and improve trace readability. + + Parameters + ---------- + spans : list[dict[str, Any]] + List of spans to filter and group + + Returns + ------- + list[dict[str, Any]] + Filtered and grouped spans + """ + filtered_spans: list[dict[str, Any]] = [] + + for span in spans: + op = span.get("op", "") + description = span.get("description", "") + + # Filter out internal Prisma HTTP requests to the query engine + if op == "http.client" and "localhost" in description: + continue + + # Filter out noisy, low-level asyncio/library functions + if "staggered_race" in description: + continue + + # Group database controller operations for cleaner reporting + if "db.controller." in op: + span["op"] = SentryManager._get_span_operation_mapping(op) + # Normalize description for grouped DB operations + span["description"] = f"DB {str(span['op']).split('.')[-1].capitalize()} Operation" + + filtered_spans.append(span) + + return filtered_spans + @staticmethod def _before_send_transaction(event: Event, hint: Hint) -> Event | None: """ @@ -122,78 +226,33 @@ def _before_send_transaction(event: Event, hint: Hint) -> Event | None: This helps reduce noise and improve transaction grouping. """ - if event.get("type") == "transaction": - transaction_name = event.get("transaction", "") - - # Filter out noisy or uninteresting transactions entirely - if any( - op in transaction_name - for op in [ - "safe_get_attr", - "connect_or_create", - "_build_", - "_add_include", - "CogLoader.load_cogs_from_folder", # Startup noise - "CogLoader Setup", # More startup noise - "Bot shutdown process", # Shutdown noise - ] - ): - return None + if event.get("type") != "transaction": + return event + + transaction_name = event.get("transaction", "") + + # Filter out noisy or uninteresting transactions entirely + noisy_operations = [ + "safe_get_attr", + "connect_or_create", + "_build_", + "_add_include", + "CogLoader.load_cogs_from_folder", # Startup noise + "CogLoader Setup", # More startup noise + "Bot shutdown process", # Shutdown noise + ] + + if any(op in transaction_name for op in noisy_operations): + return None - # Filter spans to reduce noise and group operations. - # This provides more meaningful and actionable traces. - if "spans" in event: - spans = cast(list[dict[str, Any]], event.get("spans") or []) - filtered_spans: list[dict[str, Any]] = [] - for span in spans: - op = span.get("op", "") - description = span.get("description", "") - - # Filter out internal Prisma HTTP requests to the query engine. - # These are implementation details and not useful for performance monitoring. - if op == "http.client" and "localhost" in description: - continue - - # Filter out noisy, low-level asyncio/library functions. - if "staggered_race" in description: - continue - - # Group database controller operations for cleaner reporting. - if "db.controller." in op: - if "get_" in op or "find_" in op: - span["op"] = "db.read" - elif "create_" in op: - span["op"] = "db.create" - elif "update_" in op or "increment_" in op: - span["op"] = "db.update" - elif "delete_" in op: - span["op"] = "db.delete" - elif "count_" in op: - span["op"] = "db.count" - else: - span["op"] = "db.other" - # Normalize description for grouped DB operations - span["description"] = f"DB {str(span['op']).split('.')[-1].capitalize()} Operation" - - filtered_spans.append(span) - event["spans"] = filtered_spans - - # Group all database controller transactions by type for cleaner reporting. - # This is a fallback for transactions that are purely DB operations. - if "db.controller." in transaction_name: - # Extract operation type and normalize - if "get_" in transaction_name or "find_" in transaction_name: - event["transaction"] = "db.controller.read_operation" - elif "create_" in transaction_name: - event["transaction"] = "db.controller.create_operation" - elif "update_" in transaction_name or "increment_" in transaction_name: - event["transaction"] = "db.controller.update_operation" - elif "delete_" in transaction_name: - event["transaction"] = "db.controller.delete_operation" - elif "count_" in transaction_name: - event["transaction"] = "db.controller.count_operation" - else: - event["transaction"] = "db.controller.other_operation" + # Filter spans to reduce noise and group operations + if "spans" in event: + spans = cast(list[dict[str, Any]], event.get("spans") or []) + event["spans"] = SentryManager._filter_and_group_spans(spans) + + # Group all database controller transactions by type for cleaner reporting + if "db.controller." in transaction_name: + event["transaction"] = SentryManager._get_transaction_operation_mapping(transaction_name) return event @@ -215,24 +274,23 @@ def _traces_sampler(sampling_context: dict[str, Any]) -> float: # Get transaction name for decision making transaction_name = sampling_context.get("transaction_context", {}).get("name", "") - # Very aggressive sampling in production to reduce noise - if get_current_env() not in ("dev", "development"): - # Almost no sampling for database operations (they're very frequent) - if "db.controller" in transaction_name: - return 0.01 # 1% sampling for DB operations - if "db.query" in transaction_name: - return 0.005 # 0.5% sampling for low-level DB queries - if "command" in transaction_name: - # Normal sampling for user commands (more important) - return 0.1 # 10% sampling for commands - if "cog." in transaction_name: - # Very low sampling for cog operations - return 0.02 # 2% sampling for cog ops - # Low sampling for other operations - return 0.05 # 5% sampling for other ops - # Full sampling in development for debugging - return 1.0 + if get_current_env() in ("dev", "development"): + return 1.0 + + # Production sampling rates using dictionary lookup + sampling_rates = { + "db.controller": 0.01, # 1% sampling for DB operations + "db.query": 0.005, # 0.5% sampling for low-level DB queries + "command": 0.1, # 10% sampling for commands + "cog.": 0.02, # 2% sampling for cog ops + } + + # Check for matching patterns and return appropriate sampling rate + return next( + (rate for pattern, rate in sampling_rates.items() if pattern in transaction_name), + 0.05, # Default sampling rate for other operations + ) @staticmethod def setup() -> None: From 2828d4153ac049bcda2f0c25ec201fc5a0954952 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 02:56:56 -0400 Subject: [PATCH 07/34] refactor: streamline tracing logic in enhanced_span function - Simplified the conditional check for skipping tracing in production environments. - Improved readability by restructuring the code for command instrumentation in the instrument_bot_commands function. --- tux/utils/tracing.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tux/utils/tracing.py b/tux/utils/tracing.py index 4233a6b88..86ca2c27a 100644 --- a/tux/utils/tracing.py +++ b/tux/utils/tracing.py @@ -473,10 +473,11 @@ def enhanced_span(op: str, name: str = "", **initial_data: Any) -> Generator[Dum # In production, skip tracing for certain frequent operations env = initial_data.get("environment", "development") - if env not in ("dev", "development"): - if any(skip_term in name.lower() for skip_term in ["safe_get_attr", "connect_or_create"]): - yield DummySpan() - return + if env not in ("dev", "development") and any( + skip_term in name.lower() for skip_term in ["safe_get_attr", "connect_or_create"] + ): + yield DummySpan() + return with start_span(op, name) as span: # Set initial data if provided @@ -504,10 +505,10 @@ def instrument_bot_commands(bot: commands.Bot) -> None: bot : commands.Bot The instance of the bot whose commands should be instrumented. """ - for command in bot.walk_commands(): - # The operation for commands is standardized as `command.run` - op = "command.run" + # The operation for commands is standardized as `command.run` + op = "command.run" + for command in bot.walk_commands(): # The transaction name is the full command name (e.g., "snippet get") transaction_name = f"command.{command.qualified_name}" From f8106d5450064b6d0f70feafebba907d73f5fe36 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 02:57:02 -0400 Subject: [PATCH 08/34] chore(deps): update aiohttp to version 3.12.14 and xattr to version 1.2.0 in poetry.lock --- poetry.lock | 837 ++++++++++++++++++++++++++-------------------------- 1 file changed, 419 insertions(+), 418 deletions(-) diff --git a/poetry.lock b/poetry.lock index a4b306d01..45a0bec10 100644 --- a/poetry.lock +++ b/poetry.lock @@ -58,103 +58,103 @@ files = [ [[package]] name = "aiohttp" -version = "3.12.13" +version = "3.12.14" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5421af8f22a98f640261ee48aae3a37f0c41371e99412d55eaf2f8a46d5dad29"}, - {file = "aiohttp-3.12.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fcda86f6cb318ba36ed8f1396a6a4a3fd8f856f84d426584392083d10da4de0"}, - {file = "aiohttp-3.12.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cd71c9fb92aceb5a23c4c39d8ecc80389c178eba9feab77f19274843eb9412d"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34ebf1aca12845066c963016655dac897651e1544f22a34c9b461ac3b4b1d3aa"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:893a4639694c5b7edd4bdd8141be296042b6806e27cc1d794e585c43010cc294"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:663d8ee3ffb3494502ebcccb49078faddbb84c1d870f9c1dd5a29e85d1f747ce"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0f8f6a85a0006ae2709aa4ce05749ba2cdcb4b43d6c21a16c8517c16593aabe"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1582745eb63df267c92d8b61ca655a0ce62105ef62542c00a74590f306be8cb5"}, - {file = "aiohttp-3.12.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d59227776ee2aa64226f7e086638baa645f4b044f2947dbf85c76ab11dcba073"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06b07c418bde1c8e737d8fa67741072bd3f5b0fb66cf8c0655172188c17e5fa6"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:9445c1842680efac0f81d272fd8db7163acfcc2b1436e3f420f4c9a9c5a50795"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:09c4767af0b0b98c724f5d47f2bf33395c8986995b0a9dab0575ca81a554a8c0"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f3854fbde7a465318ad8d3fc5bef8f059e6d0a87e71a0d3360bb56c0bf87b18a"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2332b4c361c05ecd381edb99e2a33733f3db906739a83a483974b3df70a51b40"}, - {file = "aiohttp-3.12.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1561db63fa1b658cd94325d303933553ea7d89ae09ff21cc3bcd41b8521fbbb6"}, - {file = "aiohttp-3.12.13-cp310-cp310-win32.whl", hash = "sha256:a0be857f0b35177ba09d7c472825d1b711d11c6d0e8a2052804e3b93166de1ad"}, - {file = "aiohttp-3.12.13-cp310-cp310-win_amd64.whl", hash = "sha256:fcc30ad4fb5cb41a33953292d45f54ef4066746d625992aeac33b8c681173178"}, - {file = "aiohttp-3.12.13-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c229b1437aa2576b99384e4be668af1db84b31a45305d02f61f5497cfa6f60c"}, - {file = "aiohttp-3.12.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04076d8c63471e51e3689c93940775dc3d12d855c0c80d18ac5a1c68f0904358"}, - {file = "aiohttp-3.12.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:55683615813ce3601640cfaa1041174dc956d28ba0511c8cbd75273eb0587014"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:921bc91e602d7506d37643e77819cb0b840d4ebb5f8d6408423af3d3bf79a7b7"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e72d17fe0974ddeae8ed86db297e23dba39c7ac36d84acdbb53df2e18505a013"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0653d15587909a52e024a261943cf1c5bdc69acb71f411b0dd5966d065a51a47"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a77b48997c66722c65e157c06c74332cdf9c7ad00494b85ec43f324e5c5a9b9a"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6946bae55fd36cfb8e4092c921075cde029c71c7cb571d72f1079d1e4e013bc"}, - {file = "aiohttp-3.12.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f95db8c8b219bcf294a53742c7bda49b80ceb9d577c8e7aa075612b7f39ffb7"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:03d5eb3cfb4949ab4c74822fb3326cd9655c2b9fe22e4257e2100d44215b2e2b"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:6383dd0ffa15515283c26cbf41ac8e6705aab54b4cbb77bdb8935a713a89bee9"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6548a411bc8219b45ba2577716493aa63b12803d1e5dc70508c539d0db8dbf5a"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:81b0fcbfe59a4ca41dc8f635c2a4a71e63f75168cc91026c61be665945739e2d"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:6a83797a0174e7995e5edce9dcecc517c642eb43bc3cba296d4512edf346eee2"}, - {file = "aiohttp-3.12.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a5734d8469a5633a4e9ffdf9983ff7cdb512524645c7a3d4bc8a3de45b935ac3"}, - {file = "aiohttp-3.12.13-cp311-cp311-win32.whl", hash = "sha256:fef8d50dfa482925bb6b4c208b40d8e9fa54cecba923dc65b825a72eed9a5dbd"}, - {file = "aiohttp-3.12.13-cp311-cp311-win_amd64.whl", hash = "sha256:9a27da9c3b5ed9d04c36ad2df65b38a96a37e9cfba6f1381b842d05d98e6afe9"}, - {file = "aiohttp-3.12.13-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0aa580cf80558557285b49452151b9c69f2fa3ad94c5c9e76e684719a8791b73"}, - {file = "aiohttp-3.12.13-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b103a7e414b57e6939cc4dece8e282cfb22043efd0c7298044f6594cf83ab347"}, - {file = "aiohttp-3.12.13-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f64e748e9e741d2eccff9597d09fb3cd962210e5b5716047cbb646dc8fe06f"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29c955989bf4c696d2ededc6b0ccb85a73623ae6e112439398935362bacfaaf6"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d640191016763fab76072c87d8854a19e8e65d7a6fcfcbf017926bdbbb30a7e5"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dc507481266b410dede95dd9f26c8d6f5a14315372cc48a6e43eac652237d9b"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a94daa873465d518db073bd95d75f14302e0208a08e8c942b2f3f1c07288a75"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f52420cde4ce0bb9425a375d95577fe082cb5721ecb61da3049b55189e4e6"}, - {file = "aiohttp-3.12.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f7df1f620ec40f1a7fbcb99ea17d7326ea6996715e78f71a1c9a021e31b96b8"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3062d4ad53b36e17796dce1c0d6da0ad27a015c321e663657ba1cc7659cfc710"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:8605e22d2a86b8e51ffb5253d9045ea73683d92d47c0b1438e11a359bdb94462"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54fbbe6beafc2820de71ece2198458a711e224e116efefa01b7969f3e2b3ddae"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:050bd277dfc3768b606fd4eae79dd58ceda67d8b0b3c565656a89ae34525d15e"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2637a60910b58f50f22379b6797466c3aa6ae28a6ab6404e09175ce4955b4e6a"}, - {file = "aiohttp-3.12.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e986067357550d1aaa21cfe9897fa19e680110551518a5a7cf44e6c5638cb8b5"}, - {file = "aiohttp-3.12.13-cp312-cp312-win32.whl", hash = "sha256:ac941a80aeea2aaae2875c9500861a3ba356f9ff17b9cb2dbfb5cbf91baaf5bf"}, - {file = "aiohttp-3.12.13-cp312-cp312-win_amd64.whl", hash = "sha256:671f41e6146a749b6c81cb7fd07f5a8356d46febdaaaf07b0e774ff04830461e"}, - {file = "aiohttp-3.12.13-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d4a18e61f271127465bdb0e8ff36e8f02ac4a32a80d8927aa52371e93cd87938"}, - {file = "aiohttp-3.12.13-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:532542cb48691179455fab429cdb0d558b5e5290b033b87478f2aa6af5d20ace"}, - {file = "aiohttp-3.12.13-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d7eea18b52f23c050ae9db5d01f3d264ab08f09e7356d6f68e3f3ac2de9dfabb"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad7c8e5c25f2a26842a7c239de3f7b6bfb92304593ef997c04ac49fb703ff4d7"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6af355b483e3fe9d7336d84539fef460120c2f6e50e06c658fe2907c69262d6b"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a95cf9f097498f35c88e3609f55bb47b28a5ef67f6888f4390b3d73e2bac6177"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8ed8c38a1c584fe99a475a8f60eefc0b682ea413a84c6ce769bb19a7ff1c5ef"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0b9170d5d800126b5bc89d3053a2363406d6e327afb6afaeda2d19ee8bb103"}, - {file = "aiohttp-3.12.13-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:372feeace612ef8eb41f05ae014a92121a512bd5067db8f25101dd88a8db11da"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a946d3702f7965d81f7af7ea8fb03bb33fe53d311df48a46eeca17e9e0beed2d"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a0c4725fae86555bbb1d4082129e21de7264f4ab14baf735278c974785cd2041"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b28ea2f708234f0a5c44eb6c7d9eb63a148ce3252ba0140d050b091b6e842d1"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d4f5becd2a5791829f79608c6f3dc745388162376f310eb9c142c985f9441cc1"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:60f2ce6b944e97649051d5f5cc0f439360690b73909230e107fd45a359d3e911"}, - {file = "aiohttp-3.12.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:69fc1909857401b67bf599c793f2183fbc4804717388b0b888f27f9929aa41f3"}, - {file = "aiohttp-3.12.13-cp313-cp313-win32.whl", hash = "sha256:7d7e68787a2046b0e44ba5587aa723ce05d711e3a3665b6b7545328ac8e3c0dd"}, - {file = "aiohttp-3.12.13-cp313-cp313-win_amd64.whl", hash = "sha256:5a178390ca90419bfd41419a809688c368e63c86bd725e1186dd97f6b89c2706"}, - {file = "aiohttp-3.12.13-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:36f6c973e003dc9b0bb4e8492a643641ea8ef0e97ff7aaa5c0f53d68839357b4"}, - {file = "aiohttp-3.12.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6cbfc73179bd67c229eb171e2e3745d2afd5c711ccd1e40a68b90427f282eab1"}, - {file = "aiohttp-3.12.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1e8b27b2d414f7e3205aa23bb4a692e935ef877e3a71f40d1884f6e04fd7fa74"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eabded0c2b2ef56243289112c48556c395d70150ce4220d9008e6b4b3dd15690"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:003038e83f1a3ff97409999995ec02fe3008a1d675478949643281141f54751d"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b6f46613031dbc92bdcaad9c4c22c7209236ec501f9c0c5f5f0b6a689bf50f3"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c332c6bb04650d59fb94ed96491f43812549a3ba6e7a16a218e612f99f04145e"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fea41a2c931fb582cb15dc86a3037329e7b941df52b487a9f8b5aa960153cbd"}, - {file = "aiohttp-3.12.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:846104f45d18fb390efd9b422b27d8f3cf8853f1218c537f36e71a385758c896"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d6c85ac7dd350f8da2520bac8205ce99df4435b399fa7f4dc4a70407073e390"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5a1ecce0ed281bec7da8550da052a6b89552db14d0a0a45554156f085a912f48"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5304d74867028cca8f64f1cc1215eb365388033c5a691ea7aa6b0dc47412f495"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:64d1f24ee95a2d1e094a4cd7a9b7d34d08db1bbcb8aa9fb717046b0a884ac294"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:119c79922a7001ca6a9e253228eb39b793ea994fd2eccb79481c64b5f9d2a055"}, - {file = "aiohttp-3.12.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:bb18f00396d22e2f10cd8825d671d9f9a3ba968d708a559c02a627536b36d91c"}, - {file = "aiohttp-3.12.13-cp39-cp39-win32.whl", hash = "sha256:0022de47ef63fd06b065d430ac79c6b0bd24cdae7feaf0e8c6bac23b805a23a8"}, - {file = "aiohttp-3.12.13-cp39-cp39-win_amd64.whl", hash = "sha256:29e08111ccf81b2734ae03f1ad1cb03b9615e7d8f616764f22f71209c094f122"}, - {file = "aiohttp-3.12.13.tar.gz", hash = "sha256:47e2da578528264a12e4e3dd8dd72a7289e5f812758fe086473fab037a10fcce"}, + {file = "aiohttp-3.12.14-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:906d5075b5ba0dd1c66fcaaf60eb09926a9fef3ca92d912d2a0bbdbecf8b1248"}, + {file = "aiohttp-3.12.14-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c875bf6fc2fd1a572aba0e02ef4e7a63694778c5646cdbda346ee24e630d30fb"}, + {file = "aiohttp-3.12.14-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fbb284d15c6a45fab030740049d03c0ecd60edad9cd23b211d7e11d3be8d56fd"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38e360381e02e1a05d36b223ecab7bc4a6e7b5ab15760022dc92589ee1d4238c"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:aaf90137b5e5d84a53632ad95ebee5c9e3e7468f0aab92ba3f608adcb914fa95"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e532a25e4a0a2685fa295a31acf65e027fbe2bea7a4b02cdfbbba8a064577663"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eab9762c4d1b08ae04a6c77474e6136da722e34fdc0e6d6eab5ee93ac29f35d1"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abe53c3812b2899889a7fca763cdfaeee725f5be68ea89905e4275476ffd7e61"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5760909b7080aa2ec1d320baee90d03b21745573780a072b66ce633eb77a8656"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:02fcd3f69051467bbaa7f84d7ec3267478c7df18d68b2e28279116e29d18d4f3"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:4dcd1172cd6794884c33e504d3da3c35648b8be9bfa946942d353b939d5f1288"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:224d0da41355b942b43ad08101b1b41ce633a654128ee07e36d75133443adcda"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e387668724f4d734e865c1776d841ed75b300ee61059aca0b05bce67061dcacc"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:dec9cde5b5a24171e0b0a4ca064b1414950904053fb77c707efd876a2da525d8"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bbad68a2af4877cc103cd94af9160e45676fc6f0c14abb88e6e092b945c2c8e3"}, + {file = "aiohttp-3.12.14-cp310-cp310-win32.whl", hash = "sha256:ee580cb7c00bd857b3039ebca03c4448e84700dc1322f860cf7a500a6f62630c"}, + {file = "aiohttp-3.12.14-cp310-cp310-win_amd64.whl", hash = "sha256:cf4f05b8cea571e2ccc3ca744e35ead24992d90a72ca2cf7ab7a2efbac6716db"}, + {file = "aiohttp-3.12.14-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f4552ff7b18bcec18b60a90c6982049cdb9dac1dba48cf00b97934a06ce2e597"}, + {file = "aiohttp-3.12.14-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8283f42181ff6ccbcf25acaae4e8ab2ff7e92b3ca4a4ced73b2c12d8cd971393"}, + {file = "aiohttp-3.12.14-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:040afa180ea514495aaff7ad34ec3d27826eaa5d19812730fe9e529b04bb2179"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b413c12f14c1149f0ffd890f4141a7471ba4b41234fe4fd4a0ff82b1dc299dbb"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:1d6f607ce2e1a93315414e3d448b831238f1874b9968e1195b06efaa5c87e245"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:565e70d03e924333004ed101599902bba09ebb14843c8ea39d657f037115201b"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4699979560728b168d5ab63c668a093c9570af2c7a78ea24ca5212c6cdc2b641"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad5fdf6af93ec6c99bf800eba3af9a43d8bfd66dce920ac905c817ef4a712afe"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ac76627c0b7ee0e80e871bde0d376a057916cb008a8f3ffc889570a838f5cc7"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:798204af1180885651b77bf03adc903743a86a39c7392c472891649610844635"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:4f1205f97de92c37dd71cf2d5bcfb65fdaed3c255d246172cce729a8d849b4da"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:76ae6f1dd041f85065d9df77c6bc9c9703da9b5c018479d20262acc3df97d419"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a194ace7bc43ce765338ca2dfb5661489317db216ea7ea700b0332878b392cab"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:16260e8e03744a6fe3fcb05259eeab8e08342c4c33decf96a9dad9f1187275d0"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8c779e5ebbf0e2e15334ea404fcce54009dc069210164a244d2eac8352a44b28"}, + {file = "aiohttp-3.12.14-cp311-cp311-win32.whl", hash = "sha256:a289f50bf1bd5be227376c067927f78079a7bdeccf8daa6a9e65c38bae14324b"}, + {file = "aiohttp-3.12.14-cp311-cp311-win_amd64.whl", hash = "sha256:0b8a69acaf06b17e9c54151a6c956339cf46db4ff72b3ac28516d0f7068f4ced"}, + {file = "aiohttp-3.12.14-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a0ecbb32fc3e69bc25efcda7d28d38e987d007096cbbeed04f14a6662d0eee22"}, + {file = "aiohttp-3.12.14-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0400f0ca9bb3e0b02f6466421f253797f6384e9845820c8b05e976398ac1d81a"}, + {file = "aiohttp-3.12.14-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a56809fed4c8a830b5cae18454b7464e1529dbf66f71c4772e3cfa9cbec0a1ff"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27f2e373276e4755691a963e5d11756d093e346119f0627c2d6518208483fb6d"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ca39e433630e9a16281125ef57ece6817afd1d54c9f1bf32e901f38f16035869"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c748b3f8b14c77720132b2510a7d9907a03c20ba80f469e58d5dfd90c079a1c"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0a568abe1b15ce69d4cc37e23020720423f0728e3cb1f9bcd3f53420ec3bfe7"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9888e60c2c54eaf56704b17feb558c7ed6b7439bca1e07d4818ab878f2083660"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3006a1dc579b9156de01e7916d38c63dc1ea0679b14627a37edf6151bc530088"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aa8ec5c15ab80e5501a26719eb48a55f3c567da45c6ea5bb78c52c036b2655c7"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:39b94e50959aa07844c7fe2206b9f75d63cc3ad1c648aaa755aa257f6f2498a9"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:04c11907492f416dad9885d503fbfc5dcb6768d90cad8639a771922d584609d3"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:88167bd9ab69bb46cee91bd9761db6dfd45b6e76a0438c7e884c3f8160ff21eb"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:791504763f25e8f9f251e4688195e8b455f8820274320204f7eafc467e609425"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2785b112346e435dd3a1a67f67713a3fe692d288542f1347ad255683f066d8e0"}, + {file = "aiohttp-3.12.14-cp312-cp312-win32.whl", hash = "sha256:15f5f4792c9c999a31d8decf444e79fcfd98497bf98e94284bf390a7bb8c1729"}, + {file = "aiohttp-3.12.14-cp312-cp312-win_amd64.whl", hash = "sha256:3b66e1a182879f579b105a80d5c4bd448b91a57e8933564bf41665064796a338"}, + {file = "aiohttp-3.12.14-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3143a7893d94dc82bc409f7308bc10d60285a3cd831a68faf1aa0836c5c3c767"}, + {file = "aiohttp-3.12.14-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3d62ac3d506cef54b355bd34c2a7c230eb693880001dfcda0bf88b38f5d7af7e"}, + {file = "aiohttp-3.12.14-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:48e43e075c6a438937c4de48ec30fa8ad8e6dfef122a038847456bfe7b947b63"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:077b4488411a9724cecc436cbc8c133e0d61e694995b8de51aaf351c7578949d"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d8c35632575653f297dcbc9546305b2c1133391089ab925a6a3706dfa775ccab"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b8ce87963f0035c6834b28f061df90cf525ff7c9b6283a8ac23acee6502afd4"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0a2cf66e32a2563bb0766eb24eae7e9a269ac0dc48db0aae90b575dc9583026"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdea089caf6d5cde975084a884c72d901e36ef9c2fd972c9f51efbbc64e96fbd"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a7865f27db67d49e81d463da64a59365ebd6b826e0e4847aa111056dcb9dc88"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0ab5b38a6a39781d77713ad930cb5e7feea6f253de656a5f9f281a8f5931b086"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b3b15acee5c17e8848d90a4ebc27853f37077ba6aec4d8cb4dbbea56d156933"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e4c972b0bdaac167c1e53e16a16101b17c6d0ed7eac178e653a07b9f7fad7151"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7442488b0039257a3bdbc55f7209587911f143fca11df9869578db6c26feeeb8"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f68d3067eecb64c5e9bab4a26aa11bd676f4c70eea9ef6536b0a4e490639add3"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f88d3704c8b3d598a08ad17d06006cb1ca52a1182291f04979e305c8be6c9758"}, + {file = "aiohttp-3.12.14-cp313-cp313-win32.whl", hash = "sha256:a3c99ab19c7bf375c4ae3debd91ca5d394b98b6089a03231d4c580ef3c2ae4c5"}, + {file = "aiohttp-3.12.14-cp313-cp313-win_amd64.whl", hash = "sha256:3f8aad695e12edc9d571f878c62bedc91adf30c760c8632f09663e5f564f4baa"}, + {file = "aiohttp-3.12.14-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b8cc6b05e94d837bcd71c6531e2344e1ff0fb87abe4ad78a9261d67ef5d83eae"}, + {file = "aiohttp-3.12.14-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d1dcb015ac6a3b8facd3677597edd5ff39d11d937456702f0bb2b762e390a21b"}, + {file = "aiohttp-3.12.14-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3779ed96105cd70ee5e85ca4f457adbce3d9ff33ec3d0ebcdf6c5727f26b21b3"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:717a0680729b4ebd7569c1dcd718c46b09b360745fd8eb12317abc74b14d14d0"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b5dd3a2ef7c7e968dbbac8f5574ebeac4d2b813b247e8cec28174a2ba3627170"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4710f77598c0092239bc12c1fcc278a444e16c7032d91babf5abbf7166463f7b"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f3e9f75ae842a6c22a195d4a127263dbf87cbab729829e0bd7857fb1672400b2"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f9c8d55d6802086edd188e3a7d85a77787e50d56ce3eb4757a3205fa4657922"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79b29053ff3ad307880d94562cca80693c62062a098a5776ea8ef5ef4b28d140"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:23e1332fff36bebd3183db0c7a547a1da9d3b4091509f6d818e098855f2f27d3"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a564188ce831fd110ea76bcc97085dd6c625b427db3f1dbb14ca4baa1447dcbc"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a7a1b4302f70bb3ec40ca86de82def532c97a80db49cac6a6700af0de41af5ee"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:1b07ccef62950a2519f9bfc1e5b294de5dd84329f444ca0b329605ea787a3de5"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:938bd3ca6259e7e48b38d84f753d548bd863e0c222ed6ee6ace3fd6752768a84"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8bc784302b6b9f163b54c4e93d7a6f09563bd01ff2b841b29ed3ac126e5040bf"}, + {file = "aiohttp-3.12.14-cp39-cp39-win32.whl", hash = "sha256:a3416f95961dd7d5393ecff99e3f41dc990fb72eda86c11f2a60308ac6dcd7a0"}, + {file = "aiohttp-3.12.14-cp39-cp39-win_amd64.whl", hash = "sha256:196858b8820d7f60578f8b47e5669b3195c21d8ab261e39b1d705346458f445f"}, + {file = "aiohttp-3.12.14.tar.gz", hash = "sha256:6e06e120e34d93100de448fd941522e11dafa78ef1a893c179901b7d66aa29f2"}, ] [package.dependencies] aiohappyeyeballs = ">=2.5.0" -aiosignal = ">=1.1.2" +aiosignal = ">=1.4.0" attrs = ">=17.3.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" @@ -166,14 +166,14 @@ speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (> [[package]] name = "aiosignal" -version = "1.3.2" +version = "1.4.0" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, - {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, + {file = "aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e"}, + {file = "aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7"}, ] [package.dependencies] @@ -327,18 +327,19 @@ dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)" [[package]] name = "backrefs" -version = "5.8" +version = "5.9" description = "A wrapper around re and regex that adds additional back references." optional = false python-versions = ">=3.9" groups = ["docs"] files = [ - {file = "backrefs-5.8-py310-none-any.whl", hash = "sha256:c67f6638a34a5b8730812f5101376f9d41dc38c43f1fdc35cb54700f6ed4465d"}, - {file = "backrefs-5.8-py311-none-any.whl", hash = "sha256:2e1c15e4af0e12e45c8701bd5da0902d326b2e200cafcd25e49d9f06d44bb61b"}, - {file = "backrefs-5.8-py312-none-any.whl", hash = "sha256:bbef7169a33811080d67cdf1538c8289f76f0942ff971222a16034da88a73486"}, - {file = "backrefs-5.8-py313-none-any.whl", hash = "sha256:e3a63b073867dbefd0536425f43db618578528e3896fb77be7141328642a1585"}, - {file = "backrefs-5.8-py39-none-any.whl", hash = "sha256:a66851e4533fb5b371aa0628e1fee1af05135616b86140c9d787a2ffdf4b8fdc"}, - {file = "backrefs-5.8.tar.gz", hash = "sha256:2cab642a205ce966af3dd4b38ee36009b31fa9502a35fd61d59ccc116e40a6bd"}, + {file = "backrefs-5.9-py310-none-any.whl", hash = "sha256:db8e8ba0e9de81fcd635f440deab5ae5f2591b54ac1ebe0550a2ca063488cd9f"}, + {file = "backrefs-5.9-py311-none-any.whl", hash = "sha256:6907635edebbe9b2dc3de3a2befff44d74f30a4562adbb8b36f21252ea19c5cf"}, + {file = "backrefs-5.9-py312-none-any.whl", hash = "sha256:7fdf9771f63e6028d7fee7e0c497c81abda597ea45d6b8f89e8ad76994f5befa"}, + {file = "backrefs-5.9-py313-none-any.whl", hash = "sha256:cc37b19fa219e93ff825ed1fed8879e47b4d89aa7a1884860e2db64ccd7c676b"}, + {file = "backrefs-5.9-py314-none-any.whl", hash = "sha256:df5e169836cc8acb5e440ebae9aad4bf9d15e226d3bad049cf3f6a5c20cc8dc9"}, + {file = "backrefs-5.9-py39-none-any.whl", hash = "sha256:f48ee18f6252b8f5777a22a00a09a85de0ca931658f1dd96d4406a34f3748c60"}, + {file = "backrefs-5.9.tar.gz", hash = "sha256:808548cb708d66b82ee231f962cb36faaf4f2baab032f2fbb783e9c2fdddaa59"}, ] [package.extras] @@ -447,14 +448,14 @@ test = ["flake8", "isort", "pytest"] [[package]] name = "certifi" -version = "2025.6.15" +version = "2025.7.14" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" groups = ["main", "dev", "docs"] files = [ - {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, - {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, + {file = "certifi-2025.7.14-py3-none-any.whl", hash = "sha256:6b31f564a415d79ee77df69d757bb49a5bb53bd9f756cbbe24394ffd6fc1f4b2"}, + {file = "certifi-2025.7.14.tar.gz", hash = "sha256:8ea99dbdfaaf2ba2f9bac77b9249ef62ec5218e7c2b2e903378ed5fccf765995"}, ] [[package]] @@ -697,79 +698,79 @@ files = [ [[package]] name = "coverage" -version = "7.9.1" +version = "7.9.2" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" groups = ["test"] files = [ - {file = "coverage-7.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cc94d7c5e8423920787c33d811c0be67b7be83c705f001f7180c7b186dcf10ca"}, - {file = "coverage-7.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16aa0830d0c08a2c40c264cef801db8bc4fc0e1892782e45bcacbd5889270509"}, - {file = "coverage-7.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf95981b126f23db63e9dbe4cf65bd71f9a6305696fa5e2262693bc4e2183f5b"}, - {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f05031cf21699785cd47cb7485f67df619e7bcdae38e0fde40d23d3d0210d3c3"}, - {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4fbcab8764dc072cb651a4bcda4d11fb5658a1d8d68842a862a6610bd8cfa3"}, - {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0f16649a7330ec307942ed27d06ee7e7a38417144620bb3d6e9a18ded8a2d3e5"}, - {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cea0a27a89e6432705fffc178064503508e3c0184b4f061700e771a09de58187"}, - {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e980b53a959fa53b6f05343afbd1e6f44a23ed6c23c4b4c56c6662bbb40c82ce"}, - {file = "coverage-7.9.1-cp310-cp310-win32.whl", hash = "sha256:70760b4c5560be6ca70d11f8988ee6542b003f982b32f83d5ac0b72476607b70"}, - {file = "coverage-7.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a66e8f628b71f78c0e0342003d53b53101ba4e00ea8dabb799d9dba0abbbcebe"}, - {file = "coverage-7.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95c765060e65c692da2d2f51a9499c5e9f5cf5453aeaf1420e3fc847cc060582"}, - {file = "coverage-7.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba383dc6afd5ec5b7a0d0c23d38895db0e15bcba7fb0fa8901f245267ac30d86"}, - {file = "coverage-7.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae0383f13cbdcf1e5e7014489b0d71cc0106458878ccde52e8a12ced4298ed"}, - {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69aa417a030bf11ec46149636314c24c8d60fadb12fc0ee8f10fda0d918c879d"}, - {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a4be2a28656afe279b34d4f91c3e26eccf2f85500d4a4ff0b1f8b54bf807338"}, - {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:382e7ddd5289f140259b610e5f5c58f713d025cb2f66d0eb17e68d0a94278875"}, - {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e5532482344186c543c37bfad0ee6069e8ae4fc38d073b8bc836fc8f03c9e250"}, - {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a39d18b3f50cc121d0ce3838d32d58bd1d15dab89c910358ebefc3665712256c"}, - {file = "coverage-7.9.1-cp311-cp311-win32.whl", hash = "sha256:dd24bd8d77c98557880def750782df77ab2b6885a18483dc8588792247174b32"}, - {file = "coverage-7.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:6b55ad10a35a21b8015eabddc9ba31eb590f54adc9cd39bcf09ff5349fd52125"}, - {file = "coverage-7.9.1-cp311-cp311-win_arm64.whl", hash = "sha256:6ad935f0016be24c0e97fc8c40c465f9c4b85cbbe6eac48934c0dc4d2568321e"}, - {file = "coverage-7.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8de12b4b87c20de895f10567639c0797b621b22897b0af3ce4b4e204a743626"}, - {file = "coverage-7.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5add197315a054e92cee1b5f686a2bcba60c4c3e66ee3de77ace6c867bdee7cb"}, - {file = "coverage-7.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:600a1d4106fe66f41e5d0136dfbc68fe7200a5cbe85610ddf094f8f22e1b0300"}, - {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a876e4c3e5a2a1715a6608906aa5a2e0475b9c0f68343c2ada98110512ab1d8"}, - {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81f34346dd63010453922c8e628a52ea2d2ccd73cb2487f7700ac531b247c8a5"}, - {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:888f8eee13f2377ce86d44f338968eedec3291876b0b8a7289247ba52cb984cd"}, - {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9969ef1e69b8c8e1e70d591f91bbc37fc9a3621e447525d1602801a24ceda898"}, - {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:60c458224331ee3f1a5b472773e4a085cc27a86a0b48205409d364272d67140d"}, - {file = "coverage-7.9.1-cp312-cp312-win32.whl", hash = "sha256:5f646a99a8c2b3ff4c6a6e081f78fad0dde275cd59f8f49dc4eab2e394332e74"}, - {file = "coverage-7.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:30f445f85c353090b83e552dcbbdad3ec84c7967e108c3ae54556ca69955563e"}, - {file = "coverage-7.9.1-cp312-cp312-win_arm64.whl", hash = "sha256:af41da5dca398d3474129c58cb2b106a5d93bbb196be0d307ac82311ca234342"}, - {file = "coverage-7.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:31324f18d5969feef7344a932c32428a2d1a3e50b15a6404e97cba1cc9b2c631"}, - {file = "coverage-7.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0c804506d624e8a20fb3108764c52e0eef664e29d21692afa375e0dd98dc384f"}, - {file = "coverage-7.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef64c27bc40189f36fcc50c3fb8f16ccda73b6a0b80d9bd6e6ce4cffcd810bbd"}, - {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4fe2348cc6ec372e25adec0219ee2334a68d2f5222e0cba9c0d613394e12d86"}, - {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34ed2186fe52fcc24d4561041979a0dec69adae7bce2ae8d1c49eace13e55c43"}, - {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:25308bd3d00d5eedd5ae7d4357161f4df743e3c0240fa773ee1b0f75e6c7c0f1"}, - {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73e9439310f65d55a5a1e0564b48e34f5369bee943d72c88378f2d576f5a5751"}, - {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ab6be0859141b53aa89412a82454b482c81cf750de4f29223d52268a86de67"}, - {file = "coverage-7.9.1-cp313-cp313-win32.whl", hash = "sha256:64bdd969456e2d02a8b08aa047a92d269c7ac1f47e0c977675d550c9a0863643"}, - {file = "coverage-7.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:be9e3f68ca9edb897c2184ad0eee815c635565dbe7a0e7e814dc1f7cbab92c0a"}, - {file = "coverage-7.9.1-cp313-cp313-win_arm64.whl", hash = "sha256:1c503289ffef1d5105d91bbb4d62cbe4b14bec4d13ca225f9c73cde9bb46207d"}, - {file = "coverage-7.9.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0b3496922cb5f4215bf5caaef4cf12364a26b0be82e9ed6d050f3352cf2d7ef0"}, - {file = "coverage-7.9.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9565c3ab1c93310569ec0d86b017f128f027cab0b622b7af288696d7ed43a16d"}, - {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2241ad5dbf79ae1d9c08fe52b36d03ca122fb9ac6bca0f34439e99f8327ac89f"}, - {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bb5838701ca68b10ebc0937dbd0eb81974bac54447c55cd58dea5bca8451029"}, - {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a25f814591a8c0c5372c11ac8967f669b97444c47fd794926e175c4047ece"}, - {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2d04b16a6062516df97969f1ae7efd0de9c31eb6ebdceaa0d213b21c0ca1a683"}, - {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7931b9e249edefb07cd6ae10c702788546341d5fe44db5b6108a25da4dca513f"}, - {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52e92b01041151bf607ee858e5a56c62d4b70f4dac85b8c8cb7fb8a351ab2c10"}, - {file = "coverage-7.9.1-cp313-cp313t-win32.whl", hash = "sha256:684e2110ed84fd1ca5f40e89aa44adf1729dc85444004111aa01866507adf363"}, - {file = "coverage-7.9.1-cp313-cp313t-win_amd64.whl", hash = "sha256:437c576979e4db840539674e68c84b3cda82bc824dd138d56bead1435f1cb5d7"}, - {file = "coverage-7.9.1-cp313-cp313t-win_arm64.whl", hash = "sha256:18a0912944d70aaf5f399e350445738a1a20b50fbea788f640751c2ed9208b6c"}, - {file = "coverage-7.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f424507f57878e424d9a95dc4ead3fbdd72fd201e404e861e465f28ea469951"}, - {file = "coverage-7.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:535fde4001b2783ac80865d90e7cc7798b6b126f4cd8a8c54acfe76804e54e58"}, - {file = "coverage-7.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02532fd3290bb8fa6bec876520842428e2a6ed6c27014eca81b031c2d30e3f71"}, - {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56f5eb308b17bca3bbff810f55ee26d51926d9f89ba92707ee41d3c061257e55"}, - {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfa447506c1a52271f1b0de3f42ea0fa14676052549095e378d5bff1c505ff7b"}, - {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9ca8e220006966b4a7b68e8984a6aee645a0384b0769e829ba60281fe61ec4f7"}, - {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:49f1d0788ba5b7ba65933f3a18864117c6506619f5ca80326b478f72acf3f385"}, - {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68cd53aec6f45b8e4724c0950ce86eacb775c6be01ce6e3669fe4f3a21e768ed"}, - {file = "coverage-7.9.1-cp39-cp39-win32.whl", hash = "sha256:95335095b6c7b1cc14c3f3f17d5452ce677e8490d101698562b2ffcacc304c8d"}, - {file = "coverage-7.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:e1b5191d1648acc439b24721caab2fd0c86679d8549ed2c84d5a7ec1bedcc244"}, - {file = "coverage-7.9.1-pp39.pp310.pp311-none-any.whl", hash = "sha256:db0f04118d1db74db6c9e1cb1898532c7dcc220f1d2718f058601f7c3f499514"}, - {file = "coverage-7.9.1-py3-none-any.whl", hash = "sha256:66b974b145aa189516b6bf2d8423e888b742517d37872f6ee4c5be0073bd9a3c"}, - {file = "coverage-7.9.1.tar.gz", hash = "sha256:6cf43c78c4282708a28e466316935ec7489a9c487518a77fa68f716c67909cec"}, + {file = "coverage-7.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:66283a192a14a3854b2e7f3418d7db05cdf411012ab7ff5db98ff3b181e1f912"}, + {file = "coverage-7.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4e01d138540ef34fcf35c1aa24d06c3de2a4cffa349e29a10056544f35cca15f"}, + {file = "coverage-7.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f22627c1fe2745ee98d3ab87679ca73a97e75ca75eb5faee48660d060875465f"}, + {file = "coverage-7.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b1c2d8363247b46bd51f393f86c94096e64a1cf6906803fa8d5a9d03784bdbf"}, + {file = "coverage-7.9.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c10c882b114faf82dbd33e876d0cbd5e1d1ebc0d2a74ceef642c6152f3f4d547"}, + {file = "coverage-7.9.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:de3c0378bdf7066c3988d66cd5232d161e933b87103b014ab1b0b4676098fa45"}, + {file = "coverage-7.9.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1e2f097eae0e5991e7623958a24ced3282676c93c013dde41399ff63e230fcf2"}, + {file = "coverage-7.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:28dc1f67e83a14e7079b6cea4d314bc8b24d1aed42d3582ff89c0295f09b181e"}, + {file = "coverage-7.9.2-cp310-cp310-win32.whl", hash = "sha256:bf7d773da6af9e10dbddacbf4e5cab13d06d0ed93561d44dae0188a42c65be7e"}, + {file = "coverage-7.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:0c0378ba787681ab1897f7c89b415bd56b0b2d9a47e5a3d8dc0ea55aac118d6c"}, + {file = "coverage-7.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a7a56a2964a9687b6aba5b5ced6971af308ef6f79a91043c05dd4ee3ebc3e9ba"}, + {file = "coverage-7.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123d589f32c11d9be7fe2e66d823a236fe759b0096f5db3fb1b75b2fa414a4fa"}, + {file = "coverage-7.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:333b2e0ca576a7dbd66e85ab402e35c03b0b22f525eed82681c4b866e2e2653a"}, + {file = "coverage-7.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:326802760da234baf9f2f85a39e4a4b5861b94f6c8d95251f699e4f73b1835dc"}, + {file = "coverage-7.9.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19e7be4cfec248df38ce40968c95d3952fbffd57b400d4b9bb580f28179556d2"}, + {file = "coverage-7.9.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0b4a4cb73b9f2b891c1788711408ef9707666501ba23684387277ededab1097c"}, + {file = "coverage-7.9.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2c8937fa16c8c9fbbd9f118588756e7bcdc7e16a470766a9aef912dd3f117dbd"}, + {file = "coverage-7.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:42da2280c4d30c57a9b578bafd1d4494fa6c056d4c419d9689e66d775539be74"}, + {file = "coverage-7.9.2-cp311-cp311-win32.whl", hash = "sha256:14fa8d3da147f5fdf9d298cacc18791818f3f1a9f542c8958b80c228320e90c6"}, + {file = "coverage-7.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:549cab4892fc82004f9739963163fd3aac7a7b0df430669b75b86d293d2df2a7"}, + {file = "coverage-7.9.2-cp311-cp311-win_arm64.whl", hash = "sha256:c2667a2b913e307f06aa4e5677f01a9746cd08e4b35e14ebcde6420a9ebb4c62"}, + {file = "coverage-7.9.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ae9eb07f1cfacd9cfe8eaee6f4ff4b8a289a668c39c165cd0c8548484920ffc0"}, + {file = "coverage-7.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9ce85551f9a1119f02adc46d3014b5ee3f765deac166acf20dbb851ceb79b6f3"}, + {file = "coverage-7.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8f6389ac977c5fb322e0e38885fbbf901743f79d47f50db706e7644dcdcb6e1"}, + {file = "coverage-7.9.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff0d9eae8cdfcd58fe7893b88993723583a6ce4dfbfd9f29e001922544f95615"}, + {file = "coverage-7.9.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fae939811e14e53ed8a9818dad51d434a41ee09df9305663735f2e2d2d7d959b"}, + {file = "coverage-7.9.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:31991156251ec202c798501e0a42bbdf2169dcb0f137b1f5c0f4267f3fc68ef9"}, + {file = "coverage-7.9.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d0d67963f9cbfc7c7f96d4ac74ed60ecbebd2ea6eeb51887af0f8dce205e545f"}, + {file = "coverage-7.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:49b752a2858b10580969ec6af6f090a9a440a64a301ac1528d7ca5f7ed497f4d"}, + {file = "coverage-7.9.2-cp312-cp312-win32.whl", hash = "sha256:88d7598b8ee130f32f8a43198ee02edd16d7f77692fa056cb779616bbea1b355"}, + {file = "coverage-7.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:9dfb070f830739ee49d7c83e4941cc767e503e4394fdecb3b54bfdac1d7662c0"}, + {file = "coverage-7.9.2-cp312-cp312-win_arm64.whl", hash = "sha256:4e2c058aef613e79df00e86b6d42a641c877211384ce5bd07585ed7ba71ab31b"}, + {file = "coverage-7.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:985abe7f242e0d7bba228ab01070fde1d6c8fa12f142e43debe9ed1dde686038"}, + {file = "coverage-7.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82c3939264a76d44fde7f213924021ed31f55ef28111a19649fec90c0f109e6d"}, + {file = "coverage-7.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae5d563e970dbe04382f736ec214ef48103d1b875967c89d83c6e3f21706d5b3"}, + {file = "coverage-7.9.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdd612e59baed2a93c8843c9a7cb902260f181370f1d772f4842987535071d14"}, + {file = "coverage-7.9.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:256ea87cb2a1ed992bcdfc349d8042dcea1b80436f4ddf6e246d6bee4b5d73b6"}, + {file = "coverage-7.9.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f44ae036b63c8ea432f610534a2668b0c3aee810e7037ab9d8ff6883de480f5b"}, + {file = "coverage-7.9.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:82d76ad87c932935417a19b10cfe7abb15fd3f923cfe47dbdaa74ef4e503752d"}, + {file = "coverage-7.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:619317bb86de4193debc712b9e59d5cffd91dc1d178627ab2a77b9870deb2868"}, + {file = "coverage-7.9.2-cp313-cp313-win32.whl", hash = "sha256:0a07757de9feb1dfafd16ab651e0f628fd7ce551604d1bf23e47e1ddca93f08a"}, + {file = "coverage-7.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:115db3d1f4d3f35f5bb021e270edd85011934ff97c8797216b62f461dd69374b"}, + {file = "coverage-7.9.2-cp313-cp313-win_arm64.whl", hash = "sha256:48f82f889c80af8b2a7bb6e158d95a3fbec6a3453a1004d04e4f3b5945a02694"}, + {file = "coverage-7.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:55a28954545f9d2f96870b40f6c3386a59ba8ed50caf2d949676dac3ecab99f5"}, + {file = "coverage-7.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cdef6504637731a63c133bb2e6f0f0214e2748495ec15fe42d1e219d1b133f0b"}, + {file = "coverage-7.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd5ebe66c7a97273d5d2ddd4ad0ed2e706b39630ed4b53e713d360626c3dbb3"}, + {file = "coverage-7.9.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9303aed20872d7a3c9cb39c5d2b9bdbe44e3a9a1aecb52920f7e7495410dfab8"}, + {file = "coverage-7.9.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc18ea9e417a04d1920a9a76fe9ebd2f43ca505b81994598482f938d5c315f46"}, + {file = "coverage-7.9.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6406cff19880aaaadc932152242523e892faff224da29e241ce2fca329866584"}, + {file = "coverage-7.9.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d0d4f6ecdf37fcc19c88fec3e2277d5dee740fb51ffdd69b9579b8c31e4232e"}, + {file = "coverage-7.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c33624f50cf8de418ab2b4d6ca9eda96dc45b2c4231336bac91454520e8d1fac"}, + {file = "coverage-7.9.2-cp313-cp313t-win32.whl", hash = "sha256:1df6b76e737c6a92210eebcb2390af59a141f9e9430210595251fbaf02d46926"}, + {file = "coverage-7.9.2-cp313-cp313t-win_amd64.whl", hash = "sha256:f5fd54310b92741ebe00d9c0d1d7b2b27463952c022da6d47c175d246a98d1bd"}, + {file = "coverage-7.9.2-cp313-cp313t-win_arm64.whl", hash = "sha256:c48c2375287108c887ee87d13b4070a381c6537d30e8487b24ec721bf2a781cb"}, + {file = "coverage-7.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ddc39510ac922a5c4c27849b739f875d3e1d9e590d1e7b64c98dadf037a16cce"}, + {file = "coverage-7.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a535c0c7364acd55229749c2b3e5eebf141865de3a8f697076a3291985f02d30"}, + {file = "coverage-7.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df0f9ef28e0f20c767ccdccfc5ae5f83a6f4a2fbdfbcbcc8487a8a78771168c8"}, + {file = "coverage-7.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f3da12e0ccbcb348969221d29441ac714bbddc4d74e13923d3d5a7a0bebef7a"}, + {file = "coverage-7.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a17eaf46f56ae0f870f14a3cbc2e4632fe3771eab7f687eda1ee59b73d09fe4"}, + {file = "coverage-7.9.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:669135a9d25df55d1ed56a11bf555f37c922cf08d80799d4f65d77d7d6123fcf"}, + {file = "coverage-7.9.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:9d3a700304d01a627df9db4322dc082a0ce1e8fc74ac238e2af39ced4c083193"}, + {file = "coverage-7.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:71ae8b53855644a0b1579d4041304ddc9995c7b21c8a1f16753c4d8903b4dfed"}, + {file = "coverage-7.9.2-cp39-cp39-win32.whl", hash = "sha256:dd7a57b33b5cf27acb491e890720af45db05589a80c1ffc798462a765be6d4d7"}, + {file = "coverage-7.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f65bb452e579d5540c8b37ec105dd54d8b9307b07bcaa186818c104ffda22441"}, + {file = "coverage-7.9.2-pp39.pp310.pp311-none-any.whl", hash = "sha256:8a1166db2fb62473285bcb092f586e081e92656c7dfa8e9f62b4d39d7e6b5050"}, + {file = "coverage-7.9.2-py3-none-any.whl", hash = "sha256:e425cd5b00f6fc0ed7cdbd766c70be8baab4b7839e4d4fe5fac48581dd968ea4"}, + {file = "coverage-7.9.2.tar.gz", hash = "sha256:997024fa51e3290264ffd7492ec97d0690293ccd2b45a6cd7d82d945a4a80c8b"}, ] [package.extras] @@ -789,49 +790,49 @@ files = [ [[package]] name = "cryptography" -version = "45.0.4" +version = "45.0.5" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.7" groups = ["main", "dev"] files = [ - {file = "cryptography-45.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:425a9a6ac2823ee6e46a76a21a4e8342d8fa5c01e08b823c1f19a8b74f096069"}, - {file = "cryptography-45.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:680806cf63baa0039b920f4976f5f31b10e772de42f16310a6839d9f21a26b0d"}, - {file = "cryptography-45.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4ca0f52170e821bc8da6fc0cc565b7bb8ff8d90d36b5e9fdd68e8a86bdf72036"}, - {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f3fe7a5ae34d5a414957cc7f457e2b92076e72938423ac64d215722f6cf49a9e"}, - {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:25eb4d4d3e54595dc8adebc6bbd5623588991d86591a78c2548ffb64797341e2"}, - {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce1678a2ccbe696cf3af15a75bb72ee008d7ff183c9228592ede9db467e64f1b"}, - {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:49fe9155ab32721b9122975e168a6760d8ce4cffe423bcd7ca269ba41b5dfac1"}, - {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:2882338b2a6e0bd337052e8b9007ced85c637da19ef9ecaf437744495c8c2999"}, - {file = "cryptography-45.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:23b9c3ea30c3ed4db59e7b9619272e94891f8a3a5591d0b656a7582631ccf750"}, - {file = "cryptography-45.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b0a97c927497e3bc36b33987abb99bf17a9a175a19af38a892dc4bbb844d7ee2"}, - {file = "cryptography-45.0.4-cp311-abi3-win32.whl", hash = "sha256:e00a6c10a5c53979d6242f123c0a97cff9f3abed7f064fc412c36dc521b5f257"}, - {file = "cryptography-45.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:817ee05c6c9f7a69a16200f0c90ab26d23a87701e2a284bd15156783e46dbcc8"}, - {file = "cryptography-45.0.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:964bcc28d867e0f5491a564b7debb3ffdd8717928d315d12e0d7defa9e43b723"}, - {file = "cryptography-45.0.4-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6a5bf57554e80f75a7db3d4b1dacaa2764611ae166ab42ea9a72bcdb5d577637"}, - {file = "cryptography-45.0.4-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:46cf7088bf91bdc9b26f9c55636492c1cce3e7aaf8041bbf0243f5e5325cfb2d"}, - {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7bedbe4cc930fa4b100fc845ea1ea5788fcd7ae9562e669989c11618ae8d76ee"}, - {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:eaa3e28ea2235b33220b949c5a0d6cf79baa80eab2eb5607ca8ab7525331b9ff"}, - {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:7ef2dde4fa9408475038fc9aadfc1fb2676b174e68356359632e980c661ec8f6"}, - {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:6a3511ae33f09094185d111160fd192c67aa0a2a8d19b54d36e4c78f651dc5ad"}, - {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:06509dc70dd71fa56eaa138336244e2fbaf2ac164fc9b5e66828fccfd2b680d6"}, - {file = "cryptography-45.0.4-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5f31e6b0a5a253f6aa49be67279be4a7e5a4ef259a9f33c69f7d1b1191939872"}, - {file = "cryptography-45.0.4-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:944e9ccf67a9594137f942d5b52c8d238b1b4e46c7a0c2891b7ae6e01e7c80a4"}, - {file = "cryptography-45.0.4-cp37-abi3-win32.whl", hash = "sha256:c22fe01e53dc65edd1945a2e6f0015e887f84ced233acecb64b4daadb32f5c97"}, - {file = "cryptography-45.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:627ba1bc94f6adf0b0a2e35d87020285ead22d9f648c7e75bb64f367375f3b22"}, - {file = "cryptography-45.0.4-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a77c6fb8d76e9c9f99f2f3437c1a4ac287b34eaf40997cfab1e9bd2be175ac39"}, - {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7aad98a25ed8ac917fdd8a9c1e706e5a0956e06c498be1f713b61734333a4507"}, - {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3530382a43a0e524bc931f187fc69ef4c42828cf7d7f592f7f249f602b5a4ab0"}, - {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:6b613164cb8425e2f8db5849ffb84892e523bf6d26deb8f9bb76ae86181fa12b"}, - {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:96d4819e25bf3b685199b304a0029ce4a3caf98947ce8a066c9137cc78ad2c58"}, - {file = "cryptography-45.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b97737a3ffbea79eebb062eb0d67d72307195035332501722a9ca86bab9e3ab2"}, - {file = "cryptography-45.0.4-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4828190fb6c4bcb6ebc6331f01fe66ae838bb3bd58e753b59d4b22eb444b996c"}, - {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:03dbff8411206713185b8cebe31bc5c0eb544799a50c09035733716b386e61a4"}, - {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51dfbd4d26172d31150d84c19bbe06c68ea4b7f11bbc7b3a5e146b367c311349"}, - {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:0339a692de47084969500ee455e42c58e449461e0ec845a34a6a9b9bf7df7fb8"}, - {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:0cf13c77d710131d33e63626bd55ae7c0efb701ebdc2b3a7952b9b23a0412862"}, - {file = "cryptography-45.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:bbc505d1dc469ac12a0a064214879eac6294038d6b24ae9f71faae1448a9608d"}, - {file = "cryptography-45.0.4.tar.gz", hash = "sha256:7405ade85c83c37682c8fe65554759800a4a8c54b2d96e0f8ad114d31b808d57"}, + {file = "cryptography-45.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:101ee65078f6dd3e5a028d4f19c07ffa4dd22cce6a20eaa160f8b5219911e7d8"}, + {file = "cryptography-45.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3a264aae5f7fbb089dbc01e0242d3b67dffe3e6292e1f5182122bdf58e65215d"}, + {file = "cryptography-45.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e74d30ec9c7cb2f404af331d5b4099a9b322a8a6b25c4632755c8757345baac5"}, + {file = "cryptography-45.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3af26738f2db354aafe492fb3869e955b12b2ef2e16908c8b9cb928128d42c57"}, + {file = "cryptography-45.0.5-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e6c00130ed423201c5bc5544c23359141660b07999ad82e34e7bb8f882bb78e0"}, + {file = "cryptography-45.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:dd420e577921c8c2d31289536c386aaa30140b473835e97f83bc71ea9d2baf2d"}, + {file = "cryptography-45.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:d05a38884db2ba215218745f0781775806bde4f32e07b135348355fe8e4991d9"}, + {file = "cryptography-45.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:ad0caded895a00261a5b4aa9af828baede54638754b51955a0ac75576b831b27"}, + {file = "cryptography-45.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9024beb59aca9d31d36fcdc1604dd9bbeed0a55bface9f1908df19178e2f116e"}, + {file = "cryptography-45.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:91098f02ca81579c85f66df8a588c78f331ca19089763d733e34ad359f474174"}, + {file = "cryptography-45.0.5-cp311-abi3-win32.whl", hash = "sha256:926c3ea71a6043921050eaa639137e13dbe7b4ab25800932a8498364fc1abec9"}, + {file = "cryptography-45.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:b85980d1e345fe769cfc57c57db2b59cff5464ee0c045d52c0df087e926fbe63"}, + {file = "cryptography-45.0.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f3562c2f23c612f2e4a6964a61d942f891d29ee320edb62ff48ffb99f3de9ae8"}, + {file = "cryptography-45.0.5-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3fcfbefc4a7f332dece7272a88e410f611e79458fab97b5efe14e54fe476f4fd"}, + {file = "cryptography-45.0.5-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:460f8c39ba66af7db0545a8c6f2eabcbc5a5528fc1cf6c3fa9a1e44cec33385e"}, + {file = "cryptography-45.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9b4cf6318915dccfe218e69bbec417fdd7c7185aa7aab139a2c0beb7468c89f0"}, + {file = "cryptography-45.0.5-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2089cc8f70a6e454601525e5bf2779e665d7865af002a5dec8d14e561002e135"}, + {file = "cryptography-45.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0027d566d65a38497bc37e0dd7c2f8ceda73597d2ac9ba93810204f56f52ebc7"}, + {file = "cryptography-45.0.5-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:be97d3a19c16a9be00edf79dca949c8fa7eff621763666a145f9f9535a5d7f42"}, + {file = "cryptography-45.0.5-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:7760c1c2e1a7084153a0f68fab76e754083b126a47d0117c9ed15e69e2103492"}, + {file = "cryptography-45.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6ff8728d8d890b3dda5765276d1bc6fb099252915a2cd3aff960c4c195745dd0"}, + {file = "cryptography-45.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7259038202a47fdecee7e62e0fd0b0738b6daa335354396c6ddebdbe1206af2a"}, + {file = "cryptography-45.0.5-cp37-abi3-win32.whl", hash = "sha256:1e1da5accc0c750056c556a93c3e9cb828970206c68867712ca5805e46dc806f"}, + {file = "cryptography-45.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:90cb0a7bb35959f37e23303b7eed0a32280510030daba3f7fdfbb65defde6a97"}, + {file = "cryptography-45.0.5-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:206210d03c1193f4e1ff681d22885181d47efa1ab3018766a7b32a7b3d6e6afd"}, + {file = "cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c648025b6840fe62e57107e0a25f604db740e728bd67da4f6f060f03017d5097"}, + {file = "cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b8fa8b0a35a9982a3c60ec79905ba5bb090fc0b9addcfd3dc2dd04267e45f25e"}, + {file = "cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:14d96584701a887763384f3c47f0ca7c1cce322aa1c31172680eb596b890ec30"}, + {file = "cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57c816dfbd1659a367831baca4b775b2a5b43c003daf52e9d57e1d30bc2e1b0e"}, + {file = "cryptography-45.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b9e38e0a83cd51e07f5a48ff9691cae95a79bea28fe4ded168a8e5c6c77e819d"}, + {file = "cryptography-45.0.5-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8c4a6ff8a30e9e3d38ac0539e9a9e02540ab3f827a3394f8852432f6b0ea152e"}, + {file = "cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bd4c45986472694e5121084c6ebbd112aa919a25e783b87eb95953c9573906d6"}, + {file = "cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:982518cd64c54fcada9d7e5cf28eabd3ee76bd03ab18e08a48cad7e8b6f31b18"}, + {file = "cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:12e55281d993a793b0e883066f590c1ae1e802e3acb67f8b442e721e475e6463"}, + {file = "cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:5aa1e32983d4443e310f726ee4b071ab7569f58eedfdd65e9675484a4eb67bd1"}, + {file = "cryptography-45.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:e357286c1b76403dd384d938f93c46b2b058ed4dfcdce64a770f0537ed3feb6f"}, + {file = "cryptography-45.0.5.tar.gz", hash = "sha256:72e76caa004ab63accdf26023fccd1d087f6d90ec6048ff33ad0445abf7f605a"}, ] markers = {dev = "sys_platform == \"linux\""} @@ -845,7 +846,7 @@ nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8 pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==45.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test = ["certifi (>=2024)", "cryptography-vectors (==45.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] [[package]] @@ -939,14 +940,14 @@ voice = ["PyNaCl (>=1.3.0,<1.6)"] [[package]] name = "distlib" -version = "0.3.9" +version = "0.4.0" description = "Distribution utilities" optional = false python-versions = "*" groups = ["dev"] files = [ - {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, - {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, + {file = "distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16"}, + {file = "distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d"}, ] [[package]] @@ -1389,24 +1390,24 @@ files = [ [[package]] name = "hishel" -version = "0.1.2" +version = "0.1.3" description = "Persistent cache implementation for httpx and httpcore" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "hishel-0.1.2-py3-none-any.whl", hash = "sha256:802b4e446017f4867efdb26d3417670991ad1b4826d24331110871fe8957b5d0"}, - {file = "hishel-0.1.2.tar.gz", hash = "sha256:6643450bfb1cfa2ecd6002769f6f5069d0d048c9c1f1e29a98a48302d5875092"}, + {file = "hishel-0.1.3-py3-none-any.whl", hash = "sha256:bae3ba9970ffc56f90014aea2b3019158fb0a5b0b635a56f414ba6b96651966e"}, + {file = "hishel-0.1.3.tar.gz", hash = "sha256:db3e07429cb739dcda851ff9b35b0f3e7589e21b90ee167df54336ac608b6ec3"}, ] [package.dependencies] httpx = ">=0.28.0" [package.extras] -redis = ["redis (==5.0.4)"] +redis = ["redis (==6.2.0)"] s3 = ["boto3 (>=1.15.0,<=1.15.3) ; python_version < \"3.12\"", "boto3 (>=1.15.3) ; python_version >= \"3.12\""] sqlite = ["anysqlite (>=0.0.5)"] -yaml = ["pyyaml (==6.0.1)"] +yaml = ["pyyaml (==6.0.2)"] [[package]] name = "htmlmin2" @@ -1597,18 +1598,18 @@ test = ["portend", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-c [[package]] name = "jaraco-functools" -version = "4.1.0" +version = "4.2.1" description = "Functools like those found in stdlib" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "jaraco.functools-4.1.0-py3-none-any.whl", hash = "sha256:ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649"}, - {file = "jaraco_functools-4.1.0.tar.gz", hash = "sha256:70f7e0e2ae076498e212562325e805204fc092d7b4c17e0e86c959e249701a9d"}, + {file = "jaraco_functools-4.2.1-py3-none-any.whl", hash = "sha256:590486285803805f4b1f99c60ca9e94ed348d4added84b74c7a12885561e524e"}, + {file = "jaraco_functools-4.2.1.tar.gz", hash = "sha256:be634abfccabce56fa3053f8c7ebe37b682683a4ee7793670ced17bab0087353"}, ] [package.dependencies] -more-itertools = "*" +more_itertools = "*" [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] @@ -2317,122 +2318,122 @@ files = [ [[package]] name = "multidict" -version = "6.5.0" +version = "6.6.3" description = "multidict implementation" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "multidict-6.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2e118a202904623b1d2606d1c8614e14c9444b59d64454b0c355044058066469"}, - {file = "multidict-6.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a42995bdcaff4e22cb1280ae7752c3ed3fbb398090c6991a2797a4a0e5ed16a9"}, - {file = "multidict-6.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2261b538145723ca776e55208640fffd7ee78184d223f37c2b40b9edfe0e818a"}, - {file = "multidict-6.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e5b19f8cd67235fab3e195ca389490415d9fef5a315b1fa6f332925dc924262"}, - {file = "multidict-6.5.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:177b081e4dec67c3320b16b3aa0babc178bbf758553085669382c7ec711e1ec8"}, - {file = "multidict-6.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d30a2cc106a7d116b52ee046207614db42380b62e6b1dd2a50eba47c5ca5eb1"}, - {file = "multidict-6.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a72933bc308d7a64de37f0d51795dbeaceebdfb75454f89035cdfc6a74cfd129"}, - {file = "multidict-6.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d109e663d032280ef8ef62b50924b2e887d5ddf19e301844a6cb7e91a172a6"}, - {file = "multidict-6.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b555329c9894332401f03b9a87016f0b707b6fccd4706793ec43b4a639e75869"}, - {file = "multidict-6.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6994bad9d471ef2156f2b6850b51e20ee409c6b9deebc0e57be096be9faffdce"}, - {file = "multidict-6.5.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:b15f817276c96cde9060569023808eec966bd8da56a97e6aa8116f34ddab6534"}, - {file = "multidict-6.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b4bf507c991db535a935b2127cf057a58dbc688c9f309c72080795c63e796f58"}, - {file = "multidict-6.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:60c3f8f13d443426c55f88cf3172547bbc600a86d57fd565458b9259239a6737"}, - {file = "multidict-6.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:a10227168a24420c158747fc201d4279aa9af1671f287371597e2b4f2ff21879"}, - {file = "multidict-6.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e3b1425fe54ccfde66b8cfb25d02be34d5dfd2261a71561ffd887ef4088b4b69"}, - {file = "multidict-6.5.0-cp310-cp310-win32.whl", hash = "sha256:b4e47ef51237841d1087e1e1548071a6ef22e27ed0400c272174fa585277c4b4"}, - {file = "multidict-6.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:63b3b24fadc7067282c88fae5b2f366d5b3a7c15c021c2838de8c65a50eeefb4"}, - {file = "multidict-6.5.0-cp310-cp310-win_arm64.whl", hash = "sha256:8b2d61afbafc679b7eaf08e9de4fa5d38bd5dc7a9c0a577c9f9588fb49f02dbb"}, - {file = "multidict-6.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8b4bf6bb15a05796a07a248084e3e46e032860c899c7a9b981030e61368dba95"}, - {file = "multidict-6.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46bb05d50219655c42a4b8fcda9c7ee658a09adbb719c48e65a20284e36328ea"}, - {file = "multidict-6.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:54f524d73f4d54e87e03c98f6af601af4777e4668a52b1bd2ae0a4d6fc7b392b"}, - {file = "multidict-6.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529b03600466480ecc502000d62e54f185a884ed4570dee90d9a273ee80e37b5"}, - {file = "multidict-6.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:69ad681ad7c93a41ee7005cc83a144b5b34a3838bcf7261e2b5356057b0f78de"}, - {file = "multidict-6.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fe9fada8bc0839466b09fa3f6894f003137942984843ec0c3848846329a36ae"}, - {file = "multidict-6.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f94c6ea6405fcf81baef1e459b209a78cda5442e61b5b7a57ede39d99b5204a0"}, - {file = "multidict-6.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ca75ad8a39ed75f079a8931435a5b51ee4c45d9b32e1740f99969a5d1cc2ee"}, - {file = "multidict-6.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be4c08f3a2a6cc42b414496017928d95898964fed84b1b2dace0c9ee763061f9"}, - {file = "multidict-6.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:046a7540cfbb4d5dc846a1fd9843f3ba980c6523f2e0c5b8622b4a5c94138ae6"}, - {file = "multidict-6.5.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:64306121171d988af77d74be0d8c73ee1a69cf6f96aea7fa6030c88f32a152dd"}, - {file = "multidict-6.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b4ac1dd5eb0ecf6f7351d5a9137f30a83f7182209c5d37f61614dfdce5714853"}, - {file = "multidict-6.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bab4a8337235365f4111a7011a1f028826ca683834ebd12de4b85e2844359c36"}, - {file = "multidict-6.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a05b5604c5a75df14a63eeeca598d11b2c3745b9008539b70826ea044063a572"}, - {file = "multidict-6.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:67c4a640952371c9ca65b6a710598be246ef3be5ca83ed38c16a7660d3980877"}, - {file = "multidict-6.5.0-cp311-cp311-win32.whl", hash = "sha256:fdeae096ca36c12d8aca2640b8407a9d94e961372c68435bef14e31cce726138"}, - {file = "multidict-6.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:e2977ef8b7ce27723ee8c610d1bd1765da4f3fbe5a64f9bf1fd3b4770e31fbc0"}, - {file = "multidict-6.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:82d0cf0ea49bae43d9e8c3851e21954eff716259ff42da401b668744d1760bcb"}, - {file = "multidict-6.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1bb986c8ea9d49947bc325c51eced1ada6d8d9b4c5b15fd3fcdc3c93edef5a74"}, - {file = "multidict-6.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:03c0923da300120830fc467e23805d63bbb4e98b94032bd863bc7797ea5fa653"}, - {file = "multidict-6.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4c78d5ec00fdd35c91680ab5cf58368faad4bd1a8721f87127326270248de9bc"}, - {file = "multidict-6.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aadc3cb78be90a887f8f6b73945b840da44b4a483d1c9750459ae69687940c97"}, - {file = "multidict-6.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5b02e1ca495d71e07e652e4cef91adae3bf7ae4493507a263f56e617de65dafc"}, - {file = "multidict-6.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7fe92a62326eef351668eec4e2dfc494927764a0840a1895cff16707fceffcd3"}, - {file = "multidict-6.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7673ee4f63879ecd526488deb1989041abcb101b2d30a9165e1e90c489f3f7fb"}, - {file = "multidict-6.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa097ae2a29f573de7e2d86620cbdda5676d27772d4ed2669cfa9961a0d73955"}, - {file = "multidict-6.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:300da0fa4f8457d9c4bd579695496116563409e676ac79b5e4dca18e49d1c308"}, - {file = "multidict-6.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9a19bd108c35877b57393243d392d024cfbfdefe759fd137abb98f6fc910b64c"}, - {file = "multidict-6.5.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:0f32a1777465a35c35ddbbd7fc1293077938a69402fcc59e40b2846d04a120dd"}, - {file = "multidict-6.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9cc1e10c14ce8112d1e6d8971fe3cdbe13e314f68bea0e727429249d4a6ce164"}, - {file = "multidict-6.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:e95c5e07a06594bdc288117ca90e89156aee8cb2d7c330b920d9c3dd19c05414"}, - {file = "multidict-6.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:40ff26f58323795f5cd2855e2718a1720a1123fb90df4553426f0efd76135462"}, - {file = "multidict-6.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:76803a29fd71869a8b59c2118c9dcfb3b8f9c8723e2cce6baeb20705459505cf"}, - {file = "multidict-6.5.0-cp312-cp312-win32.whl", hash = "sha256:df7ecbc65a53a2ce1b3a0c82e6ad1a43dcfe7c6137733f9176a92516b9f5b851"}, - {file = "multidict-6.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ec1c3fbbb0b655a6540bce408f48b9a7474fd94ed657dcd2e890671fefa7743"}, - {file = "multidict-6.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:2d24a00d34808b22c1f15902899b9d82d0faeca9f56281641c791d8605eacd35"}, - {file = "multidict-6.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:53d92df1752df67a928fa7f884aa51edae6f1cf00eeb38cbcf318cf841c17456"}, - {file = "multidict-6.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:680210de2c38eef17ce46b8df8bf2c1ece489261a14a6e43c997d49843a27c99"}, - {file = "multidict-6.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e279259bcb936732bfa1a8eec82b5d2352b3df69d2fa90d25808cfc403cee90a"}, - {file = "multidict-6.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1c185fc1069781e3fc8b622c4331fb3b433979850392daa5efbb97f7f9959bb"}, - {file = "multidict-6.5.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6bb5f65ff91daf19ce97f48f63585e51595539a8a523258b34f7cef2ec7e0617"}, - {file = "multidict-6.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8646b4259450c59b9286db280dd57745897897284f6308edbdf437166d93855"}, - {file = "multidict-6.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d245973d4ecc04eea0a8e5ebec7882cf515480036e1b48e65dffcfbdf86d00be"}, - {file = "multidict-6.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a133e7ddc9bc7fb053733d0ff697ce78c7bf39b5aec4ac12857b6116324c8d75"}, - {file = "multidict-6.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80d696fa38d738fcebfd53eec4d2e3aeb86a67679fd5e53c325756682f152826"}, - {file = "multidict-6.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:20d30c9410ac3908abbaa52ee5967a754c62142043cf2ba091e39681bd51d21a"}, - {file = "multidict-6.5.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:6c65068cc026f217e815fa519d8e959a7188e94ec163ffa029c94ca3ef9d4a73"}, - {file = "multidict-6.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e355ac668a8c3e49c2ca8daa4c92f0ad5b705d26da3d5af6f7d971e46c096da7"}, - {file = "multidict-6.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:08db204213d0375a91a381cae0677ab95dd8c67a465eb370549daf6dbbf8ba10"}, - {file = "multidict-6.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ffa58e3e215af8f6536dc837a990e456129857bb6fd546b3991be470abd9597a"}, - {file = "multidict-6.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3e86eb90015c6f21658dbd257bb8e6aa18bdb365b92dd1fba27ec04e58cdc31b"}, - {file = "multidict-6.5.0-cp313-cp313-win32.whl", hash = "sha256:f34a90fbd9959d0f857323bd3c52b3e6011ed48f78d7d7b9e04980b8a41da3af"}, - {file = "multidict-6.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:fcb2aa79ac6aef8d5b709bbfc2fdb1d75210ba43038d70fbb595b35af470ce06"}, - {file = "multidict-6.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:6dcee5e7e92060b4bb9bb6f01efcbb78c13d0e17d9bc6eec71660dd71dc7b0c2"}, - {file = "multidict-6.5.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:cbbc88abea2388fde41dd574159dec2cda005cb61aa84950828610cb5010f21a"}, - {file = "multidict-6.5.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70b599f70ae6536e5976364d3c3cf36f40334708bd6cebdd1e2438395d5e7676"}, - {file = "multidict-6.5.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:828bab777aa8d29d59700018178061854e3a47727e0611cb9bec579d3882de3b"}, - {file = "multidict-6.5.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9695fc1462f17b131c111cf0856a22ff154b0480f86f539d24b2778571ff94d"}, - {file = "multidict-6.5.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b5ac6ebaf5d9814b15f399337ebc6d3a7f4ce9331edd404e76c49a01620b68d"}, - {file = "multidict-6.5.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84a51e3baa77ded07be4766a9e41d977987b97e49884d4c94f6d30ab6acaee14"}, - {file = "multidict-6.5.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8de67f79314d24179e9b1869ed15e88d6ba5452a73fc9891ac142e0ee018b5d6"}, - {file = "multidict-6.5.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17f78a52c214481d30550ec18208e287dfc4736f0c0148208334b105fd9e0887"}, - {file = "multidict-6.5.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2966d0099cb2e2039f9b0e73e7fd5eb9c85805681aa2a7f867f9d95b35356921"}, - {file = "multidict-6.5.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:86fb42ed5ed1971c642cc52acc82491af97567534a8e381a8d50c02169c4e684"}, - {file = "multidict-6.5.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:4e990cbcb6382f9eae4ec720bcac6a1351509e6fc4a5bb70e4984b27973934e6"}, - {file = "multidict-6.5.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d99a59d64bb1f7f2117bec837d9e534c5aeb5dcedf4c2b16b9753ed28fdc20a3"}, - {file = "multidict-6.5.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:e8ef15cc97c9890212e1caf90f0d63f6560e1e101cf83aeaf63a57556689fb34"}, - {file = "multidict-6.5.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:b8a09aec921b34bd8b9f842f0bcfd76c6a8c033dc5773511e15f2d517e7e1068"}, - {file = "multidict-6.5.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ff07b504c23b67f2044533244c230808a1258b3493aaf3ea2a0785f70b7be461"}, - {file = "multidict-6.5.0-cp313-cp313t-win32.whl", hash = "sha256:9232a117341e7e979d210e41c04e18f1dc3a1d251268df6c818f5334301274e1"}, - {file = "multidict-6.5.0-cp313-cp313t-win_amd64.whl", hash = "sha256:44cb5c53fb2d4cbcee70a768d796052b75d89b827643788a75ea68189f0980a1"}, - {file = "multidict-6.5.0-cp313-cp313t-win_arm64.whl", hash = "sha256:51d33fafa82640c0217391d4ce895d32b7e84a832b8aee0dcc1b04d8981ec7f4"}, - {file = "multidict-6.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c0078358470da8dc90c37456f4a9cde9f86200949a048d53682b9cd21e5bbf2b"}, - {file = "multidict-6.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5cc7968b7d1bf8b973c307d38aa3a2f2c783f149bcac855944804252f1df5105"}, - {file = "multidict-6.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ad73a60e11aa92f1f2c9330efdeaac4531b719fc568eb8d312fd4112f34cc18"}, - {file = "multidict-6.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3233f21abdcd180b2624eb6988a1e1287210e99bca986d8320afca5005d85844"}, - {file = "multidict-6.5.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:bee5c0b79fca78fd2ab644ca4dc831ecf793eb6830b9f542ee5ed2c91bc35a0e"}, - {file = "multidict-6.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e053a4d690f4352ce46583080fefade9a903ce0fa9d820db1be80bdb9304fa2f"}, - {file = "multidict-6.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:42bdee30424c1f4dcda96e07ac60e2a4ede8a89f8ae2f48b5e4ccc060f294c52"}, - {file = "multidict-6.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58b2ded1a7982cf7b8322b0645713a0086b2b3cf5bb9f7c01edfc1a9f98d20dc"}, - {file = "multidict-6.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f805b8b951d1fadc5bc18c3c93e509608ac5a883045ee33bc22e28806847c20"}, - {file = "multidict-6.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2540395b63723da748f850568357a39cd8d8d4403ca9439f9fcdad6dd423c780"}, - {file = "multidict-6.5.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:c96aedff25f4e47b6697ba048b2c278f7caa6df82c7c3f02e077bcc8d47b4b76"}, - {file = "multidict-6.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e80de5ad995de210fd02a65c2350649b8321d09bd2e44717eaefb0f5814503e8"}, - {file = "multidict-6.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6cb9bcedd9391b313e5ec2fb3aa07c03e050550e7b9e4646c076d5c24ba01532"}, - {file = "multidict-6.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a7d130ed7a112e25ab47309962ecafae07d073316f9d158bc7b3936b52b80121"}, - {file = "multidict-6.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:95750a9a9741cd1855d1b6cb4c6031ae01c01ad38d280217b64bfae986d39d56"}, - {file = "multidict-6.5.0-cp39-cp39-win32.whl", hash = "sha256:7f78caf409914f108f4212b53a9033abfdc2cbab0647e9ac3a25bb0f21ab43d2"}, - {file = "multidict-6.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:220c74009507e847a3a6fc5375875f2a2e05bd9ce28cf607be0e8c94600f4472"}, - {file = "multidict-6.5.0-cp39-cp39-win_arm64.whl", hash = "sha256:d98f4ac9c1ede7e9d04076e2e6d967e15df0079a6381b297270f6bcab661195e"}, - {file = "multidict-6.5.0-py3-none-any.whl", hash = "sha256:5634b35f225977605385f56153bd95a7133faffc0ffe12ad26e10517537e8dfc"}, - {file = "multidict-6.5.0.tar.gz", hash = "sha256:942bd8002492ba819426a8d7aefde3189c1b87099cdf18aaaefefcf7f3f7b6d2"}, + {file = "multidict-6.6.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a2be5b7b35271f7fff1397204ba6708365e3d773579fe2a30625e16c4b4ce817"}, + {file = "multidict-6.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:12f4581d2930840295c461764b9a65732ec01250b46c6b2c510d7ee68872b140"}, + {file = "multidict-6.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dd7793bab517e706c9ed9d7310b06c8672fd0aeee5781bfad612f56b8e0f7d14"}, + {file = "multidict-6.6.3-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:72d8815f2cd3cf3df0f83cac3f3ef801d908b2d90409ae28102e0553af85545a"}, + {file = "multidict-6.6.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:531e331a2ee53543ab32b16334e2deb26f4e6b9b28e41f8e0c87e99a6c8e2d69"}, + {file = "multidict-6.6.3-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:42ca5aa9329a63be8dc49040f63817d1ac980e02eeddba763a9ae5b4027b9c9c"}, + {file = "multidict-6.6.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:208b9b9757060b9faa6f11ab4bc52846e4f3c2fb8b14d5680c8aac80af3dc751"}, + {file = "multidict-6.6.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:acf6b97bd0884891af6a8b43d0f586ab2fcf8e717cbd47ab4bdddc09e20652d8"}, + {file = "multidict-6.6.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:68e9e12ed00e2089725669bdc88602b0b6f8d23c0c95e52b95f0bc69f7fe9b55"}, + {file = "multidict-6.6.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:05db2f66c9addb10cfa226e1acb363450fab2ff8a6df73c622fefe2f5af6d4e7"}, + {file = "multidict-6.6.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:0db58da8eafb514db832a1b44f8fa7906fdd102f7d982025f816a93ba45e3dcb"}, + {file = "multidict-6.6.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:14117a41c8fdb3ee19c743b1c027da0736fdb79584d61a766da53d399b71176c"}, + {file = "multidict-6.6.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:877443eaaabcd0b74ff32ebeed6f6176c71850feb7d6a1d2db65945256ea535c"}, + {file = "multidict-6.6.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:70b72e749a4f6e7ed8fb334fa8d8496384840319512746a5f42fa0aec79f4d61"}, + {file = "multidict-6.6.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:43571f785b86afd02b3855c5ac8e86ec921b760298d6f82ff2a61daf5a35330b"}, + {file = "multidict-6.6.3-cp310-cp310-win32.whl", hash = "sha256:20c5a0c3c13a15fd5ea86c42311859f970070e4e24de5a550e99d7c271d76318"}, + {file = "multidict-6.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:ab0a34a007704c625e25a9116c6770b4d3617a071c8a7c30cd338dfbadfe6485"}, + {file = "multidict-6.6.3-cp310-cp310-win_arm64.whl", hash = "sha256:769841d70ca8bdd140a715746199fc6473414bd02efd678d75681d2d6a8986c5"}, + {file = "multidict-6.6.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:18f4eba0cbac3546b8ae31e0bbc55b02c801ae3cbaf80c247fcdd89b456ff58c"}, + {file = "multidict-6.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef43b5dd842382329e4797c46f10748d8c2b6e0614f46b4afe4aee9ac33159df"}, + {file = "multidict-6.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bf9bd1fd5eec01494e0f2e8e446a74a85d5e49afb63d75a9934e4a5423dba21d"}, + {file = "multidict-6.6.3-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:5bd8d6f793a787153956cd35e24f60485bf0651c238e207b9a54f7458b16d539"}, + {file = "multidict-6.6.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bf99b4daf908c73856bd87ee0a2499c3c9a3d19bb04b9c6025e66af3fd07462"}, + {file = "multidict-6.6.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b9e59946b49dafaf990fd9c17ceafa62976e8471a14952163d10a7a630413a9"}, + {file = "multidict-6.6.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e2db616467070d0533832d204c54eea6836a5e628f2cb1e6dfd8cd6ba7277cb7"}, + {file = "multidict-6.6.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7394888236621f61dcdd25189b2768ae5cc280f041029a5bcf1122ac63df79f9"}, + {file = "multidict-6.6.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f114d8478733ca7388e7c7e0ab34b72547476b97009d643644ac33d4d3fe1821"}, + {file = "multidict-6.6.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cdf22e4db76d323bcdc733514bf732e9fb349707c98d341d40ebcc6e9318ef3d"}, + {file = "multidict-6.6.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e995a34c3d44ab511bfc11aa26869b9d66c2d8c799fa0e74b28a473a692532d6"}, + {file = "multidict-6.6.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:766a4a5996f54361d8d5a9050140aa5362fe48ce51c755a50c0bc3706460c430"}, + {file = "multidict-6.6.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:3893a0d7d28a7fe6ca7a1f760593bc13038d1d35daf52199d431b61d2660602b"}, + {file = "multidict-6.6.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:934796c81ea996e61914ba58064920d6cad5d99140ac3167901eb932150e2e56"}, + {file = "multidict-6.6.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9ed948328aec2072bc00f05d961ceadfd3e9bfc2966c1319aeaf7b7c21219183"}, + {file = "multidict-6.6.3-cp311-cp311-win32.whl", hash = "sha256:9f5b28c074c76afc3e4c610c488e3493976fe0e596dd3db6c8ddfbb0134dcac5"}, + {file = "multidict-6.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc7f6fbc61b1c16050a389c630da0b32fc6d4a3d191394ab78972bf5edc568c2"}, + {file = "multidict-6.6.3-cp311-cp311-win_arm64.whl", hash = "sha256:d4e47d8faffaae822fb5cba20937c048d4f734f43572e7079298a6c39fb172cb"}, + {file = "multidict-6.6.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:056bebbeda16b2e38642d75e9e5310c484b7c24e3841dc0fb943206a72ec89d6"}, + {file = "multidict-6.6.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e5f481cccb3c5c5e5de5d00b5141dc589c1047e60d07e85bbd7dea3d4580d63f"}, + {file = "multidict-6.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:10bea2ee839a759ee368b5a6e47787f399b41e70cf0c20d90dfaf4158dfb4e55"}, + {file = "multidict-6.6.3-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:2334cfb0fa9549d6ce2c21af2bfbcd3ac4ec3646b1b1581c88e3e2b1779ec92b"}, + {file = "multidict-6.6.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8fee016722550a2276ca2cb5bb624480e0ed2bd49125b2b73b7010b9090e888"}, + {file = "multidict-6.6.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5511cb35f5c50a2db21047c875eb42f308c5583edf96bd8ebf7d770a9d68f6d"}, + {file = "multidict-6.6.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:712b348f7f449948e0a6c4564a21c7db965af900973a67db432d724619b3c680"}, + {file = "multidict-6.6.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e4e15d2138ee2694e038e33b7c3da70e6b0ad8868b9f8094a72e1414aeda9c1a"}, + {file = "multidict-6.6.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8df25594989aebff8a130f7899fa03cbfcc5d2b5f4a461cf2518236fe6f15961"}, + {file = "multidict-6.6.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:159ca68bfd284a8860f8d8112cf0521113bffd9c17568579e4d13d1f1dc76b65"}, + {file = "multidict-6.6.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e098c17856a8c9ade81b4810888c5ad1914099657226283cab3062c0540b0643"}, + {file = "multidict-6.6.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:67c92ed673049dec52d7ed39f8cf9ebbadf5032c774058b4406d18c8f8fe7063"}, + {file = "multidict-6.6.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:bd0578596e3a835ef451784053cfd327d607fc39ea1a14812139339a18a0dbc3"}, + {file = "multidict-6.6.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:346055630a2df2115cd23ae271910b4cae40f4e336773550dca4889b12916e75"}, + {file = "multidict-6.6.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:555ff55a359302b79de97e0468e9ee80637b0de1fce77721639f7cd9440b3a10"}, + {file = "multidict-6.6.3-cp312-cp312-win32.whl", hash = "sha256:73ab034fb8d58ff85c2bcbadc470efc3fafeea8affcf8722855fb94557f14cc5"}, + {file = "multidict-6.6.3-cp312-cp312-win_amd64.whl", hash = "sha256:04cbcce84f63b9af41bad04a54d4cc4e60e90c35b9e6ccb130be2d75b71f8c17"}, + {file = "multidict-6.6.3-cp312-cp312-win_arm64.whl", hash = "sha256:0f1130b896ecb52d2a1e615260f3ea2af55fa7dc3d7c3003ba0c3121a759b18b"}, + {file = "multidict-6.6.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:540d3c06d48507357a7d57721e5094b4f7093399a0106c211f33540fdc374d55"}, + {file = "multidict-6.6.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9c19cea2a690f04247d43f366d03e4eb110a0dc4cd1bbeee4d445435428ed35b"}, + {file = "multidict-6.6.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7af039820cfd00effec86bda5d8debef711a3e86a1d3772e85bea0f243a4bd65"}, + {file = "multidict-6.6.3-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:500b84f51654fdc3944e936f2922114349bf8fdcac77c3092b03449f0e5bc2b3"}, + {file = "multidict-6.6.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3fc723ab8a5c5ed6c50418e9bfcd8e6dceba6c271cee6728a10a4ed8561520c"}, + {file = "multidict-6.6.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:94c47ea3ade005b5976789baaed66d4de4480d0a0bf31cef6edaa41c1e7b56a6"}, + {file = "multidict-6.6.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:dbc7cf464cc6d67e83e136c9f55726da3a30176f020a36ead246eceed87f1cd8"}, + {file = "multidict-6.6.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:900eb9f9da25ada070f8ee4a23f884e0ee66fe4e1a38c3af644256a508ad81ca"}, + {file = "multidict-6.6.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7c6df517cf177da5d47ab15407143a89cd1a23f8b335f3a28d57e8b0a3dbb884"}, + {file = "multidict-6.6.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4ef421045f13879e21c994b36e728d8e7d126c91a64b9185810ab51d474f27e7"}, + {file = "multidict-6.6.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:6c1e61bb4f80895c081790b6b09fa49e13566df8fbff817da3f85b3a8192e36b"}, + {file = "multidict-6.6.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e5e8523bb12d7623cd8300dbd91b9e439a46a028cd078ca695eb66ba31adee3c"}, + {file = "multidict-6.6.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ef58340cc896219e4e653dade08fea5c55c6df41bcc68122e3be3e9d873d9a7b"}, + {file = "multidict-6.6.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fc9dc435ec8699e7b602b94fe0cd4703e69273a01cbc34409af29e7820f777f1"}, + {file = "multidict-6.6.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9e864486ef4ab07db5e9cb997bad2b681514158d6954dd1958dfb163b83d53e6"}, + {file = "multidict-6.6.3-cp313-cp313-win32.whl", hash = "sha256:5633a82fba8e841bc5c5c06b16e21529573cd654f67fd833650a215520a6210e"}, + {file = "multidict-6.6.3-cp313-cp313-win_amd64.whl", hash = "sha256:e93089c1570a4ad54c3714a12c2cef549dc9d58e97bcded193d928649cab78e9"}, + {file = "multidict-6.6.3-cp313-cp313-win_arm64.whl", hash = "sha256:c60b401f192e79caec61f166da9c924e9f8bc65548d4246842df91651e83d600"}, + {file = "multidict-6.6.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:02fd8f32d403a6ff13864b0851f1f523d4c988051eea0471d4f1fd8010f11134"}, + {file = "multidict-6.6.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f3aa090106b1543f3f87b2041eef3c156c8da2aed90c63a2fbed62d875c49c37"}, + {file = "multidict-6.6.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e924fb978615a5e33ff644cc42e6aa241effcf4f3322c09d4f8cebde95aff5f8"}, + {file = "multidict-6.6.3-cp313-cp313t-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:b9fe5a0e57c6dbd0e2ce81ca66272282c32cd11d31658ee9553849d91289e1c1"}, + {file = "multidict-6.6.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b24576f208793ebae00280c59927c3b7c2a3b1655e443a25f753c4611bc1c373"}, + {file = "multidict-6.6.3-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:135631cb6c58eac37d7ac0df380294fecdc026b28837fa07c02e459c7fb9c54e"}, + {file = "multidict-6.6.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:274d416b0df887aef98f19f21578653982cfb8a05b4e187d4a17103322eeaf8f"}, + {file = "multidict-6.6.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e252017a817fad7ce05cafbe5711ed40faeb580e63b16755a3a24e66fa1d87c0"}, + {file = "multidict-6.6.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e4cc8d848cd4fe1cdee28c13ea79ab0ed37fc2e89dd77bac86a2e7959a8c3bc"}, + {file = "multidict-6.6.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9e236a7094b9c4c1b7585f6b9cca34b9d833cf079f7e4c49e6a4a6ec9bfdc68f"}, + {file = "multidict-6.6.3-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:e0cb0ab69915c55627c933f0b555a943d98ba71b4d1c57bc0d0a66e2567c7471"}, + {file = "multidict-6.6.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:81ef2f64593aba09c5212a3d0f8c906a0d38d710a011f2f42759704d4557d3f2"}, + {file = "multidict-6.6.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:b9cbc60010de3562545fa198bfc6d3825df430ea96d2cc509c39bd71e2e7d648"}, + {file = "multidict-6.6.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:70d974eaaa37211390cd02ef93b7e938de564bbffa866f0b08d07e5e65da783d"}, + {file = "multidict-6.6.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3713303e4a6663c6d01d648a68f2848701001f3390a030edaaf3fc949c90bf7c"}, + {file = "multidict-6.6.3-cp313-cp313t-win32.whl", hash = "sha256:639ecc9fe7cd73f2495f62c213e964843826f44505a3e5d82805aa85cac6f89e"}, + {file = "multidict-6.6.3-cp313-cp313t-win_amd64.whl", hash = "sha256:9f97e181f344a0ef3881b573d31de8542cc0dbc559ec68c8f8b5ce2c2e91646d"}, + {file = "multidict-6.6.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ce8b7693da41a3c4fde5871c738a81490cea5496c671d74374c8ab889e1834fb"}, + {file = "multidict-6.6.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c8161b5a7778d3137ea2ee7ae8a08cce0010de3b00ac671c5ebddeaa17cefd22"}, + {file = "multidict-6.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1328201ee930f069961ae707d59c6627ac92e351ed5b92397cf534d1336ce557"}, + {file = "multidict-6.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b1db4d2093d6b235de76932febf9d50766cf49a5692277b2c28a501c9637f616"}, + {file = "multidict-6.6.3-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53becb01dd8ebd19d1724bebe369cfa87e4e7f29abbbe5c14c98ce4c383e16cd"}, + {file = "multidict-6.6.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41bb9d1d4c303886e2d85bade86e59885112a7f4277af5ad47ab919a2251f306"}, + {file = "multidict-6.6.3-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:775b464d31dac90f23192af9c291dc9f423101857e33e9ebf0020a10bfcf4144"}, + {file = "multidict-6.6.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d04d01f0a913202205a598246cf77826fe3baa5a63e9f6ccf1ab0601cf56eca0"}, + {file = "multidict-6.6.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d25594d3b38a2e6cabfdcafef339f754ca6e81fbbdb6650ad773ea9775af35ab"}, + {file = "multidict-6.6.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:35712f1748d409e0707b165bf49f9f17f9e28ae85470c41615778f8d4f7d9609"}, + {file = "multidict-6.6.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1c8082e5814b662de8589d6a06c17e77940d5539080cbab9fe6794b5241b76d9"}, + {file = "multidict-6.6.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:61af8a4b771f1d4d000b3168c12c3120ccf7284502a94aa58c68a81f5afac090"}, + {file = "multidict-6.6.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:448e4a9afccbf297577f2eaa586f07067441e7b63c8362a3540ba5a38dc0f14a"}, + {file = "multidict-6.6.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:233ad16999afc2bbd3e534ad8dbe685ef8ee49a37dbc2cdc9514e57b6d589ced"}, + {file = "multidict-6.6.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:bb933c891cd4da6bdcc9733d048e994e22e1883287ff7540c2a0f3b117605092"}, + {file = "multidict-6.6.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:37b09ca60998e87734699e88c2363abfd457ed18cfbf88e4009a4e83788e63ed"}, + {file = "multidict-6.6.3-cp39-cp39-win32.whl", hash = "sha256:f54cb79d26d0cd420637d184af38f0668558f3c4bbe22ab7ad830e67249f2e0b"}, + {file = "multidict-6.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:295adc9c0551e5d5214b45cf29ca23dbc28c2d197a9c30d51aed9e037cb7c578"}, + {file = "multidict-6.6.3-cp39-cp39-win_arm64.whl", hash = "sha256:15332783596f227db50fb261c2c251a58ac3873c457f3a550a95d5c0aa3c770d"}, + {file = "multidict-6.6.3-py3-none-any.whl", hash = "sha256:8db10f29c7541fc5da4defd8cd697e1ca429db743fa716325f236079b96f775a"}, + {file = "multidict-6.6.3.tar.gz", hash = "sha256:798a9eb12dab0a6c2e29c1de6f3468af5cb2da6053a20dfa3344907eed0937cc"}, ] [[package]] @@ -2489,14 +2490,14 @@ files = [ [[package]] name = "pbs-installer" -version = "2025.6.12" +version = "2025.7.12" description = "Installer for Python Build Standalone" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "pbs_installer-2025.6.12-py3-none-any.whl", hash = "sha256:438e75de131a2114ac5e86156fc51da7dadd6734844de329ad162cca63709297"}, - {file = "pbs_installer-2025.6.12.tar.gz", hash = "sha256:ae2d3990848652dca699a680b00ea8e19b970cb6172967cb00539bfeed5a7465"}, + {file = "pbs_installer-2025.7.12-py3-none-any.whl", hash = "sha256:d73414224fceb60d4a07bea97facd9acc05de792dd7becc90a7f22383e7c1cab"}, + {file = "pbs_installer-2025.7.12.tar.gz", hash = "sha256:343b8905e1da3cd4b03b68d630086330dde1814294963b77d2664b18b5002ac6"}, ] [package.dependencies] @@ -4142,14 +4143,14 @@ files = [ [[package]] name = "types-aiofiles" -version = "24.1.0.20250606" +version = "24.1.0.20250708" description = "Typing stubs for aiofiles" optional = false python-versions = ">=3.9" groups = ["types"] files = [ - {file = "types_aiofiles-24.1.0.20250606-py3-none-any.whl", hash = "sha256:e568c53fb9017c80897a9aa15c74bf43b7ee90e412286ec1e0912b6e79301aee"}, - {file = "types_aiofiles-24.1.0.20250606.tar.gz", hash = "sha256:48f9e26d2738a21e0b0f19381f713dcdb852a36727da8414b1ada145d40a18fe"}, + {file = "types_aiofiles-24.1.0.20250708-py3-none-any.whl", hash = "sha256:07f8f06465fd415d9293467d1c66cd074b2c3b62b679e26e353e560a8cf63720"}, + {file = "types_aiofiles-24.1.0.20250708.tar.gz", hash = "sha256:c8207ed7385491ce5ba94da02658164ebd66b69a44e892288c9f20cbbf5284ff"}, ] [[package]] @@ -4178,14 +4179,14 @@ files = [ [[package]] name = "types-dateparser" -version = "1.2.0.20250601" +version = "1.2.2.20250627" description = "Typing stubs for dateparser" optional = false python-versions = ">=3.9" groups = ["types"] files = [ - {file = "types_dateparser-1.2.0.20250601-py3-none-any.whl", hash = "sha256:114726e7c79f11090618f67cf985dc8262a6d94f16867287db5f94fb4354e179"}, - {file = "types_dateparser-1.2.0.20250601.tar.gz", hash = "sha256:f5a40579b4b0b6737f19d50ea58ca43edcd820577f90d4d5c89a231680bb2834"}, + {file = "types_dateparser-1.2.2.20250627-py3-none-any.whl", hash = "sha256:47fa841640e9e2d96ea69b7debf90423f9506429eb75035d50e3e58b898b71fc"}, + {file = "types_dateparser-1.2.2.20250627.tar.gz", hash = "sha256:4435d920755c00176d60ed18d44aefa3501d0219b6caff3ea4a26c928c7df0e0"}, ] [[package]] @@ -4256,14 +4257,14 @@ files = [ [[package]] name = "types-python-dateutil" -version = "2.9.0.20250516" +version = "2.9.0.20250708" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "types_python_dateutil-2.9.0.20250516-py3-none-any.whl", hash = "sha256:2b2b3f57f9c6a61fba26a9c0ffb9ea5681c9b83e69cd897c6b5f668d9c0cab93"}, - {file = "types_python_dateutil-2.9.0.20250516.tar.gz", hash = "sha256:13e80d6c9c47df23ad773d54b2826bd52dbbb41be87c3f339381c1700ad21ee5"}, + {file = "types_python_dateutil-2.9.0.20250708-py3-none-any.whl", hash = "sha256:4d6d0cc1cc4d24a2dc3816024e502564094497b713f7befda4d5bc7a8e3fd21f"}, + {file = "types_python_dateutil-2.9.0.20250708.tar.gz", hash = "sha256:ccdbd75dab2d6c9696c350579f34cffe2c281e4c5f27a585b2a2438dd1d5c8ab"}, ] [[package]] @@ -4292,14 +4293,14 @@ files = [ [[package]] name = "typing-extensions" -version = "4.14.0" +version = "4.14.1" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" groups = ["main", "dev", "docs"] files = [ - {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, - {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, + {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, + {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, ] [[package]] @@ -4460,83 +4461,83 @@ dev = ["black (>=19.3b0) ; python_version >= \"3.6\"", "pytest (>=4.6.2)"] [[package]] name = "xattr" -version = "1.1.4" +version = "1.2.0" description = "Python wrapper for extended filesystem attributes" optional = false python-versions = ">=3.8" groups = ["dev"] markers = "sys_platform == \"darwin\"" files = [ - {file = "xattr-1.1.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:acb85b6249e9f3ea10cbb56df1021d43f4027212f0d004304bc9075dc7f54769"}, - {file = "xattr-1.1.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1a848ab125c0fafdc501ccd83b4c9018bba576a037a4ca5960a22f39e295552e"}, - {file = "xattr-1.1.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:467ee77471d26ae5187ee7081b82175b5ca56ead4b71467ec2e6119d1b08beed"}, - {file = "xattr-1.1.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fd35f46cb0154f7033f9d5d0960f226857acb0d1e0d71fd7af18ed84663007c"}, - {file = "xattr-1.1.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d956478e9bb98a1efd20ebc6e5703497c1d2d690d5a13c4df4abf59881eed50"}, - {file = "xattr-1.1.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f25dfdcd974b700fb04a40e14a664a80227ee58e02ea062ac241f0d7dc54b4e"}, - {file = "xattr-1.1.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:33b63365c1fcbc80a79f601575bac0d6921732e0245b776876f3db3fcfefe22d"}, - {file = "xattr-1.1.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:544542be95c9b49e211f0a463758f200de88ba6d5a94d3c4f42855a484341acd"}, - {file = "xattr-1.1.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac14c9893f3ea046784b7702be30889b200d31adcd2e6781a8a190b6423f9f2d"}, - {file = "xattr-1.1.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bb4bbe37ba95542081890dd34fa5347bef4651e276647adaa802d5d0d7d86452"}, - {file = "xattr-1.1.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3da489ecef798705f9a39ea8cea4ead0d1eeed55f92c345add89740bd930bab6"}, - {file = "xattr-1.1.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:798dd0cbe696635a6f74b06fc430818bf9c3b24314e1502eadf67027ab60c9b0"}, - {file = "xattr-1.1.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b2b6361626efad5eb5a6bf8172c6c67339e09397ee8140ec41258737bea9681"}, - {file = "xattr-1.1.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7fa20a0c9ce022d19123b1c5b848d00a68b837251835a7929fe041ee81dcd0"}, - {file = "xattr-1.1.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e20eeb08e2c57fc7e71f050b1cfae35cbb46105449853a582bf53fd23c5379e"}, - {file = "xattr-1.1.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:477370e75821bded901487e5e752cffe554d1bd3bd4839b627d4d1ee8c95a093"}, - {file = "xattr-1.1.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a8682091cd34a9f4a93c8aaea4101aae99f1506e24da00a3cc3dd2eca9566f21"}, - {file = "xattr-1.1.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2e079b3b1a274ba2121cf0da38bbe5c8d2fb1cc49ecbceb395ce20eb7d69556d"}, - {file = "xattr-1.1.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ae6579dea05bf9f335a082f711d5924a98da563cac72a2d550f5b940c401c0e9"}, - {file = "xattr-1.1.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cd6038ec9df2e67af23c212693751481d5f7e858156924f14340376c48ed9ac7"}, - {file = "xattr-1.1.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:608b2877526674eb15df4150ef4b70b7b292ae00e65aecaae2f192af224be200"}, - {file = "xattr-1.1.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54dad1a6a998c6a23edfd25e99f4d38e9b942d54e518570044edf8c767687ea"}, - {file = "xattr-1.1.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0dab6ff72bb2b508f3850c368f8e53bd706585012676e1f71debba3310acde8"}, - {file = "xattr-1.1.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a3c54c6af7cf09432b2c461af257d5f4b1cb2d59eee045f91bacef44421a46d"}, - {file = "xattr-1.1.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e346e05a158d554639fbf7a0db169dc693c2d2260c7acb3239448f1ff4a9d67f"}, - {file = "xattr-1.1.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3ff6d9e2103d0d6e5fcd65b85a2005b66ea81c0720a37036445faadc5bbfa424"}, - {file = "xattr-1.1.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7a2ee4563c6414dfec0d1ac610f59d39d5220531ae06373eeb1a06ee37cd193f"}, - {file = "xattr-1.1.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878df1b38cfdadf3184ad8c7b0f516311128d5597b60ac0b3486948953658a83"}, - {file = "xattr-1.1.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0c9b8350244a1c5454f93a8d572628ff71d7e2fc2f7480dcf4c4f0e8af3150fe"}, - {file = "xattr-1.1.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a46bf48fb662b8bd745b78bef1074a1e08f41a531168de62b5d7bd331dadb11a"}, - {file = "xattr-1.1.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83fc3c07b583777b1dda6355329f75ca6b7179fe0d1002f1afe0ef96f7e3b5de"}, - {file = "xattr-1.1.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6308b19cff71441513258699f0538394fad5d66e1d324635207a97cb076fd439"}, - {file = "xattr-1.1.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48c00ddc15ddadc9c729cd9504dabf50adb3d9c28f647d4ac9a3df45a046b1a0"}, - {file = "xattr-1.1.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a06136196f26293758e1b244200b73156a0274af9a7349fa201c71c7af3bb9e8"}, - {file = "xattr-1.1.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8fc2631a3c6cfcdc71f7f0f847461839963754e76a2015de71e7e71e3304abc0"}, - {file = "xattr-1.1.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d6e1e835f9c938d129dd45e7eb52ebf7d2d6816323dab93ce311bf331f7d2328"}, - {file = "xattr-1.1.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:60dea2d369a6484e8b7136224fc2971e10e2c46340d83ab780924afe78c90066"}, - {file = "xattr-1.1.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:85c2b778b09d919523f80f244d799a142302582d76da18903dc693207c4020b0"}, - {file = "xattr-1.1.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ee0abba9e1b890d39141714ff43e9666864ca635ea8a5a2194d989e6b17fe862"}, - {file = "xattr-1.1.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e4174ba7f51f46b95ea7918d907c91cd579575d59e6a2f22ca36a0551026737"}, - {file = "xattr-1.1.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2b05e52e99d82d87528c54c2c5c8c5fb0ba435f85ac6545511aeea136e49925"}, - {file = "xattr-1.1.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a3696fad746be37de34eb73c60ea67144162bd08106a5308a90ce9dea9a3287"}, - {file = "xattr-1.1.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a3a7149439a26b68904c14fdc4587cde4ac7d80303e9ff0fefcfd893b698c976"}, - {file = "xattr-1.1.4-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:507b36a126ce900dbfa35d4e2c2db92570c933294cba5d161ecd6a89f7b52f43"}, - {file = "xattr-1.1.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9392b417b54923e031041940d396b1d709df1d3779c6744454e1f1c1f4dad4f5"}, - {file = "xattr-1.1.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e9f00315e6c02943893b77f544776b49c756ac76960bea7cb8d7e1b96aefc284"}, - {file = "xattr-1.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c8f98775065260140efb348b1ff8d50fd66ddcbf0c685b76eb1e87b380aaffb3"}, - {file = "xattr-1.1.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b471c6a515f434a167ca16c5c15ff34ee42d11956baa749173a8a4e385ff23e7"}, - {file = "xattr-1.1.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee0763a1b7ceb78ba2f78bee5f30d1551dc26daafcce4ac125115fa1def20519"}, - {file = "xattr-1.1.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:099e6e9ce7999b403d36d9cf943105a3d25d8233486b54ec9d1b78623b050433"}, - {file = "xattr-1.1.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3e56faef9dde8d969f0d646fb6171883693f88ae39163ecd919ec707fbafa85"}, - {file = "xattr-1.1.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:328156d4e594c9ae63e1072503c168849e601a153ad37f0290743544332d6b6f"}, - {file = "xattr-1.1.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a57a55a27c7864d6916344c9a91776afda6c3b8b2209f8a69b79cdba93fbe128"}, - {file = "xattr-1.1.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3c19cdde08b040df1e99d2500bf8a9cff775ab0e6fa162bf8afe6d84aa93ed04"}, - {file = "xattr-1.1.4-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c72667f19d3a9acf324aed97f58861d398d87e42314731e7c6ab3ac7850c971"}, - {file = "xattr-1.1.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:67ae934d75ea2563fc48a27c5945749575c74a6de19fdd38390917ddcb0e4f24"}, - {file = "xattr-1.1.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a1b0c348dd8523554dc535540d2046c0c8a535bb086561d8359f3667967b6ca"}, - {file = "xattr-1.1.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22284255d2a8e8f3da195bd8e8d43ce674dbc7c38d38cb6ecfb37fae7755d31f"}, - {file = "xattr-1.1.4-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b38aac5ef4381c26d3ce147ca98fba5a78b1e5bcd6be6755b4908659f2705c6d"}, - {file = "xattr-1.1.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:803f864af528f6f763a5be1e7b1ccab418e55ae0e4abc8bda961d162f850c991"}, - {file = "xattr-1.1.4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:40354ebfb5cecd60a5fbb9833a8a452d147486b0ffec547823658556625d98b5"}, - {file = "xattr-1.1.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2abaf5d06be3361bfa8e0db2ee123ba8e92beab5bceed5e9d7847f2145a32e04"}, - {file = "xattr-1.1.4-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3e638e5ffedc3565242b5fa3296899d35161bad771f88d66277b58f03a1ba9fe"}, - {file = "xattr-1.1.4-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0597e919d116ec39997804288d77bec3777228368efc0f2294b84a527fc4f9c2"}, - {file = "xattr-1.1.4-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee9455c501d19f065527afda974418b3ef7c61e85d9519d122cd6eb3cb7a00"}, - {file = "xattr-1.1.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:89ed62ce430f5789e15cfc1ccabc172fd8b349c3a17c52d9e6c64ecedf08c265"}, - {file = "xattr-1.1.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e25b824f4b9259cd8bb6e83c4873cf8bf080f6e4fa034a02fe778e07aba8d345"}, - {file = "xattr-1.1.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8fba66faa0016dfc0af3dd7ac5782b5786a1dfb851f9f3455e266f94c2a05a04"}, - {file = "xattr-1.1.4-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ec4b0c3e0a7bcd103f3cf31dd40c349940b2d4223ce43d384a3548992138ef1"}, - {file = "xattr-1.1.4.tar.gz", hash = "sha256:b7b02ecb2270da5b7e7deaeea8f8b528c17368401c2b9d5f63e91f545b45d372"}, + {file = "xattr-1.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3df4d8d91e2996c3c72a390ec82e8544acdcb6c7df67b954f1736ff37ea4293e"}, + {file = "xattr-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f5eec248976bbfa6c23df25d4995413df57dccf4161f6cbae36f643e99dbc397"}, + {file = "xattr-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fafecfdedf7e8d455443bec2c3edab8a93d64672619cd1a4ee043a806152e19c"}, + {file = "xattr-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c229e245c6c9a85d2fd7d07531498f837dd34670e556b552f73350f11edf000c"}, + {file = "xattr-1.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:376631e2383918fbc3dc9bcaeb9a533e319322d2cff1c119635849edf74e1126"}, + {file = "xattr-1.2.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbae24ab22afe078d549645501ecacaa17229e0b7769c8418fad69b51ad37c9"}, + {file = "xattr-1.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a161160211081d765ac41fa056f4f9b1051f027f08188730fbc9782d0dce623e"}, + {file = "xattr-1.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a542acf6c4e8221664b51b35e0160c44bd0ed1f2fd80019476f7698f4911e560"}, + {file = "xattr-1.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:034f075fc5a9391a1597a6c9a21cb57b688680f0f18ecf73b2efc22b8d330cff"}, + {file = "xattr-1.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:00c26c14c90058338993bb2d3e1cebf562e94ec516cafba64a8f34f74b9d18b4"}, + {file = "xattr-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b4f43dc644db87d5eb9484a9518c34a864cb2e588db34cffc42139bf55302a1c"}, + {file = "xattr-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c7602583fc643ca76576498e2319c7cef0b72aef1936701678589da6371b731b"}, + {file = "xattr-1.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90c3ad4a9205cceb64ec54616aa90aa42d140c8ae3b9710a0aaa2843a6f1aca7"}, + {file = "xattr-1.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83d87cfe19cd606fc0709d45a4d6efc276900797deced99e239566926a5afedf"}, + {file = "xattr-1.2.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c67dabd9ddc04ead63fbc85aed459c9afcc24abfc5bb3217fff7ec9a466faacb"}, + {file = "xattr-1.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9a18ee82d8ba2c17f1e8414bfeb421fa763e0fb4acbc1e124988ca1584ad32d5"}, + {file = "xattr-1.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:38de598c47b85185e745986a061094d2e706e9c2d9022210d2c738066990fe91"}, + {file = "xattr-1.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:15e754e854bdaac366ad3f1c8fbf77f6668e8858266b4246e8c5f487eeaf1179"}, + {file = "xattr-1.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:daff0c1f5c5e4eaf758c56259c4f72631fa9619875e7a25554b6077dc73da964"}, + {file = "xattr-1.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:109b11fb3f73a0d4e199962f11230ab5f462e85a8021874f96c1732aa61148d5"}, + {file = "xattr-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c7c12968ce0bf798d8ba90194cef65de768bee9f51a684e022c74cab4218305"}, + {file = "xattr-1.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37989dabf25ff18773e4aaeebcb65604b9528f8645f43e02bebaa363e3ae958"}, + {file = "xattr-1.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:165de92b0f2adafb336f936931d044619b9840e35ba01079f4dd288747b73714"}, + {file = "xattr-1.2.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82191c006ae4c609b22b9aea5f38f68fff022dc6884c4c0e1dba329effd4b288"}, + {file = "xattr-1.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b2e9c87dc643b09d86befad218e921f6e65b59a4668d6262b85308de5dbd1dd"}, + {file = "xattr-1.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:14edd5d47d0bb92b23222c0bb6379abbddab01fb776b2170758e666035ecf3aa"}, + {file = "xattr-1.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:12183d5eb104d4da787638c7dadf63b718472d92fec6dbe12994ea5d094d7863"}, + {file = "xattr-1.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c385ea93a18aeb6443a719eb6a6b1d7f7b143a4d1f2b08bc4fadfc429209e629"}, + {file = "xattr-1.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2d39d7b36842c67ab3040bead7eb6d601e35fa0d6214ed20a43df4ec30b6f9f9"}, + {file = "xattr-1.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:320ef856bb817f4c40213b6de956dc440d0f23cdc62da3ea02239eb5147093f8"}, + {file = "xattr-1.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26d306bfb3b5641726f2ee0da6f63a2656aa7fdcfd15de61c476e3ca6bc3277e"}, + {file = "xattr-1.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c67e70d5d8136d328ad13f85b887ffa97690422f1a11fb29ab2f702cf66e825a"}, + {file = "xattr-1.2.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8904d3539afe1a84fc0b7f02fa91da60d2505adf2d5951dc855bf9e75fe322b2"}, + {file = "xattr-1.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2520516c1d058895eae00b2b2f10833514caea6dc6802eef1e431c474b5317ad"}, + {file = "xattr-1.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:29d06abbef4024b7469fcd0d4ade6d2290582350a4df95fcc48fa48b2e83246b"}, + {file = "xattr-1.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:093c75f7d9190be355b8e86da3f460b9bfe3d6a176f92852d44dcc3289aa10dc"}, + {file = "xattr-1.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ee3901db48de913dcef004c5d7b477a1f4aadff997445ef62907b10fdad57de"}, + {file = "xattr-1.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b837898a5225c7f7df731783cd78bae2ed81b84bacf020821f1cd2ab2d74de58"}, + {file = "xattr-1.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cedc281811e424ecf6a14208532f7ac646866f91f88e8eadd00d8fe535e505fd"}, + {file = "xattr-1.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf60577caa248f539e4e646090b10d6ad1f54189de9a7f1854c23fdef28f574e"}, + {file = "xattr-1.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:363724f33510d2e7c7e080b389271a1241cb4929a1d9294f89721152b4410972"}, + {file = "xattr-1.2.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97db00596865845efb72f3d565a1f82b01006c5bf5a87d8854a6afac43502593"}, + {file = "xattr-1.2.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0b199ba31078f3e4181578595cd60400ee055b4399672169ceee846d33ff26de"}, + {file = "xattr-1.2.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:b19472dc38150ac09a478c71092738d86882bc9ff687a4a8f7d1a25abce20b5e"}, + {file = "xattr-1.2.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:79f7823b30ed557e0e7ffd9a6b1a821a22f485f5347e54b8d24c4a34b7545ba4"}, + {file = "xattr-1.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eee258f5774933cb972cff5c3388166374e678980d2a1f417d7d6f61d9ae172"}, + {file = "xattr-1.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2a9de621eadf0466c391363bd6ed903b1a1bcd272422b5183fd06ef79d05347b"}, + {file = "xattr-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bc714f236f17c57c510ae9ada9962d8e4efc9f9ea91504e2c6a09008f3918ddf"}, + {file = "xattr-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:545e0ad3f706724029efd23dec58fb358422ae68ab4b560b712aedeaf40446a0"}, + {file = "xattr-1.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:200bb3cdba057cb721b727607bc340a74c28274f4a628a26011f574860f5846b"}, + {file = "xattr-1.2.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b0b27c889cc9ff0dba62ac8a2eef98f4911c1621e4e8c409d5beb224c4c227c"}, + {file = "xattr-1.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ea7cf8afd717853ad78eba8ca83ff66a53484ba2bb2a4283462bc5c767518174"}, + {file = "xattr-1.2.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:02fa813db054bbb7a61c570ae025bd01c36fc20727b40f49031feb930234bc72"}, + {file = "xattr-1.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2827e23d7a1a20f31162c47ab4bd341a31e83421121978c4ab2aad5cd79ea82b"}, + {file = "xattr-1.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:29ae44247d46e63671311bf7e700826a97921278e2c0c04c2d11741888db41b8"}, + {file = "xattr-1.2.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:629c42c1dd813442d90f281f69b88ef0c9625f604989bef8411428671f70f43e"}, + {file = "xattr-1.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:549f8fbda5da48cafc81ba6ab7bb8e8e14c4b0748c37963dc504bcae505474b7"}, + {file = "xattr-1.2.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa83e677b5f92a3c5c86eaf875e9d3abbc43887ff1767178def865fa9f12a3a0"}, + {file = "xattr-1.2.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb669f01627962ce2bc556f19d421162247bc2cad0d4625d6ea5eb32af4cf29b"}, + {file = "xattr-1.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:212156aa5fb987a53211606bc09e6fea3eda3855af9f2940e40df5a2a592425a"}, + {file = "xattr-1.2.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:7dc4fa9448a513077c5ccd1ce428ff0682cdddfc71301dbbe4ee385c74517f73"}, + {file = "xattr-1.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e4b93f2e74793b61c0a7b7bdef4a3813930df9c01eda72fad706b8db7658bc2"}, + {file = "xattr-1.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dddd5f6d0bb95b099d6a3888c248bf246525647ccb8cf9e8f0fc3952e012d6fb"}, + {file = "xattr-1.2.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68fbdffebe8c398a82c84ecf5e6f6a3adde9364f891cba066e58352af404a45c"}, + {file = "xattr-1.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c9ee84de7cd4a6d61b0b79e2f58a6bdb13b03dbad948489ebb0b73a95caee7ae"}, + {file = "xattr-1.2.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5594fcbc38fdbb3af16a8ad18c37c81c8814955f0d636be857a67850cd556490"}, + {file = "xattr-1.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:017aac8005e1e84d5efa4b86c0896c6eb96f2331732d388600a5b999166fec1c"}, + {file = "xattr-1.2.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d27a64f695440450c119ae4bc8f54b0b726a812ebea1666fff3873236936f36"}, + {file = "xattr-1.2.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f7e7067e1a400ad4485536a9e84c3330373086b2324fafa26d07527eeb4b175"}, + {file = "xattr-1.2.0.tar.gz", hash = "sha256:a64c8e21eff1be143accf80fd3b8fde3e28a478c37da298742af647ac3e5e0a7"}, ] [package.dependencies] From d2bfbb0649fcde69759877a201000279de86c077 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 03:11:39 -0400 Subject: [PATCH 09/34] refactor(bot.py): remove command_prefix argument from super().__init__ call The command_prefix argument is removed from the super().__init__ call because it is not necessary to pass it explicitly. The command prefix is managed internally, likely through other means, making the explicit passing redundant and simplifying the initialization process. --- tux/bot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tux/bot.py b/tux/bot.py index 3b1da6238..8396271b0 100644 --- a/tux/bot.py +++ b/tux/bot.py @@ -92,7 +92,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: the sequence of operations for the bot's startup and shutdown routines. It also creates and schedules the main setup task. """ - super().__init__(*args, **kwargs, command_prefix=self._get_prefix) + super().__init__(*args, **kwargs) # Core bot state flags, managed by the BotState dataclass. self.state = BotState() From ceef3d8ff937dfba3baec1f092256256d50fa684 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 03:11:50 -0400 Subject: [PATCH 10/34] refactor(cog_loader.py): load cogs sequentially within priority groups to prevent dependency issues - Updated cog loading logic to process cogs sequentially within each priority group, addressing potential race conditions. - Implemented error handling to skip remaining cogs in a group if one fails to load, preventing cascading failures. - Enhanced documentation to clarify the loading process and its implications on cog dependencies. --- tux/cog_loader.py | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/tux/cog_loader.py b/tux/cog_loader.py index c5e4b5c24..640362233 100644 --- a/tux/cog_loader.py +++ b/tux/cog_loader.py @@ -9,7 +9,6 @@ from __future__ import annotations -import asyncio import time from collections import defaultdict from itertools import groupby @@ -321,6 +320,11 @@ async def _load_cogs_from_directory(self, path: Path) -> list[CogLoadResult]: """ Discovers, groups, and loads all eligible cogs from a directory. + Cogs are loaded by priority groups in descending order. Within each priority + group, cogs are loaded sequentially to prevent race conditions and dependency + issues. If a cog fails to load within a priority group, the remaining cogs + in that group are skipped to prevent cascading failures. + Parameters ---------- path : Path @@ -347,18 +351,30 @@ async def _load_cogs_from_directory(self, path: Path) -> list[CogLoadResult]: set_span_attributes({"cog_count": len(cogs_to_load), "categories": list(categories)}) start_time = time.perf_counter() - results = await asyncio.gather( - *[self._load_single_cog(cog) for cog in cogs_to_load], - return_exceptions=True, - ) - group_results = [result for result in results if isinstance(result, CogLoadResult)] + # Load cogs sequentially within priority group to avoid dependency issues + # This prevents race conditions that could occur if cogs within the same + # priority group depend on each other during import/initialization + group_results: list[CogLoadResult] = [] + for cog in cogs_to_load: + try: + result = await self._load_single_cog(cog) + group_results.append(result) + except CogLoadError as e: + # Create a failed result for tracking + failed_result = self._create_load_result(cog, start_time, success=False, error=e) + group_results.append(failed_result) + # Stop loading remaining cogs in this priority group to prevent + # cascading failures from dependency issues + logger.warning(f"Skipping remaining cogs in priority {priority} due to failure: {e}") + break + all_results.extend(group_results) set_span_attributes( { "load_time_s": time.perf_counter() - start_time, - "success_count": len(group_results), - "failure_count": len(results) - len(group_results), + "success_count": len([r for r in group_results if r.success]), + "failure_count": len([r for r in group_results if not r.success]), }, ) return all_results From 60ca11d5ee2f0358b9ad397e1f009b6399670d4b Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 03:21:47 -0400 Subject: [PATCH 11/34] fix(cog_loader.py): improve error handling for missing cog paths - Changed the warning log to an error log when a cog path is not found. - Raised a FileNotFoundError with a descriptive message if the specified path does not exist. - Added error handling to gracefully manage missing folders during the loading process while logging the error for visibility. --- tux/cog_loader.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tux/cog_loader.py b/tux/cog_loader.py index 640362233..7cacf5c60 100644 --- a/tux/cog_loader.py +++ b/tux/cog_loader.py @@ -396,14 +396,17 @@ async def load_cogs(self, path: Path) -> list[CogLoadResult]: Raises ------ + FileNotFoundError + If the specified path does not exist. CogLoadError If a fatal error occurs during the loading process. """ set_span_attributes({"cog.path": str(path)}) if not path.exists(): - logger.warning(f"Cog path not found: {path}") - return [] + logger.error(f"Cog path not found: {path}") + msg = f"Cog path not found: {path}" + raise FileNotFoundError(msg) try: if path.is_dir(): @@ -444,6 +447,11 @@ async def load_cogs_from_folder(self, folder_name: str) -> list[CogLoadResult]: start_time = time.perf_counter() try: results = await self.load_cogs(path=cog_path) + except FileNotFoundError as e: + # Handle missing folders gracefully but log as error for visibility + capture_span_exception(e, folder=folder_name, operation="load_folder") + logger.error(f"Cog folder not found: {folder_name} - {e}") + return [] except CogLoadError as e: capture_span_exception(e, folder=folder_name, operation="load_folder") logger.error(f"Failed to load cogs from folder {folder_name}: {e}") From e2b998efa23aad9d778b7d6d73c5eaff5d599b72 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 03:24:44 -0400 Subject: [PATCH 12/34] refactor(bot.py): enhance command transaction handling in Sentry integration - Updated command transaction logic to create a transaction for every invocation, even when ctx.command is None. - Improved command name retrieval by using a fallback to "unknown_command" if the command is not available. - Streamlined the context setting for SentryManager during command invocations. --- tux/bot.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tux/bot.py b/tux/bot.py index 8396271b0..08d42e866 100644 --- a/tux/bot.py +++ b/tux/bot.py @@ -543,16 +543,16 @@ async def invoke(self, ctx: commands.Context[Any]) -> None: ctx : commands.Context[Any] The context of the command invocation. """ - if not self.sentry_manager.is_initialized or not ctx.command: + if not self.sentry_manager.is_initialized: await super().invoke(ctx) return - # Create a transaction for the command + # Create a transaction for every invocation, even if ctx.command is None + command_name = getattr(ctx.command, "qualified_name", None) or "unknown_command" op = "command" - name = ctx.command.qualified_name description = ctx.message.content - with start_transaction(op, name, description): + with start_transaction(op, command_name, description): # Set comprehensive context using the SentryManager self.sentry_manager.set_command_context(ctx) From 3772808016a7329cca79e5c64bc1af1952d2fbb6 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 03:42:31 -0400 Subject: [PATCH 13/34] fix(influxdblogger.py): improve logging and task management for InfluxDB integration - Added an info log statement to confirm successful initialization of the InfluxDB logger. - Updated warning log to indicate task stopping when the InfluxDB writer is not initialized. - Ensured the logger stops if InfluxDB is not configured, preventing unnecessary task execution. --- tux/cogs/services/influxdblogger.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tux/cogs/services/influxdblogger.py b/tux/cogs/services/influxdblogger.py index fada085b5..a4044b016 100644 --- a/tux/cogs/services/influxdblogger.py +++ b/tux/cogs/services/influxdblogger.py @@ -20,8 +20,11 @@ def __init__(self, bot: Tux): if self.init_influx(): self.logger.start() + logger.info("InfluxDB logger initialized successfully") else: logger.warning("InfluxDB logger failed to init. Check .env configuration if you want to use it.") + # Don't start the task if InfluxDB is not configured + # The cog will remain loaded but the task won't run def init_influx(self) -> bool: """Initialize InfluxDB client for metrics logging. @@ -49,7 +52,8 @@ async def logger(self) -> None: Collects data from various database models and writes metrics to InfluxDB. """ if not self.influx_write_api: - logger.warning("InfluxDB writer not initialized, skipping metrics collection") + logger.warning("InfluxDB writer not initialized, stopping task") + self.logger.stop() return influx_bucket = "tux stats" From 381716854aa5a488c72b30c5687fefe16d26b559 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 03:42:37 -0400 Subject: [PATCH 14/34] refactor(task_manager.py): enhance task management with cog unloading support - Updated task registration to occur after cogs are loaded, ensuring tasks are only registered when their corresponding cogs exist. - Added methods to unregister critical tasks and clean up tasks associated with unloaded cogs, improving resource management and logging for task cleanup. --- tux/utils/task_manager.py | 41 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/tux/utils/task_manager.py b/tux/utils/task_manager.py index 24b5f0d0e..15d0dc80d 100644 --- a/tux/utils/task_manager.py +++ b/tux/utils/task_manager.py @@ -141,9 +141,8 @@ def __init__(self, bot: BotProtocol) -> None: self.last_health_check: float = 0.0 self.health_check_interval: float = 300.0 # 5 minutes - # Register default critical tasks - for task_config in self.DEFAULT_CRITICAL_TASKS: - self.register_critical_task(task_config) + # Note: Critical tasks are now registered after cogs are loaded + # to ensure cogs exist before registering their tasks def setup_task_instrumentation(self) -> None: """ @@ -212,6 +211,42 @@ def register_critical_task(self, config: CriticalTaskConfig) -> None: ) logger.debug(f"Registered critical task: {config.name}") + def unregister_critical_task(self, task_name: str) -> None: + """ + Unregister a critical task when its cog is unloaded. + + Parameters + ---------- + task_name : str + The name of the task to unregister. + """ + if task_name in self.critical_tasks: + del self.critical_tasks[task_name] + logger.debug(f"Unregistered critical task: {task_name}") + + if task_name in self.task_metrics: + del self.task_metrics[task_name] + logger.debug(f"Removed metrics for task: {task_name}") + + def cleanup_cog_tasks(self, cog_name: str) -> None: + """ + Clean up all critical tasks associated with a specific cog. + + Parameters + ---------- + cog_name : str + The name of the cog that was unloaded. + """ + tasks_to_remove = [ + task_name for task_name, config in self.critical_tasks.items() if config.cog_name == cog_name + ] + + for task_name in tasks_to_remove: + self.unregister_critical_task(task_name) + + if tasks_to_remove: + logger.info(f"Cleaned up {len(tasks_to_remove)} critical tasks for unloaded cog: {cog_name}") + def get_task_health(self, task_name: str) -> TaskHealth | None: """ Get health status for a specific task. From f13c30b63c302afb9e5870a35c960ebe41628fef Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 03:42:45 -0400 Subject: [PATCH 15/34] refactor(bot.py): add critical task registration and cog unloading cleanup - Introduced a new method to register critical tasks after cogs are loaded, ensuring tasks are only registered for existing cogs. - Enhanced the cog unloading process to clean up associated critical tasks, preventing orphaned task references. - Updated the remove_cog method to ensure proper cleanup of tasks when a cog is removed. - Improved logging for task registration and cleanup operations to enhance visibility and debugging. --- tux/bot.py | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/tux/bot.py b/tux/bot.py index 08d42e866..783d1b391 100644 --- a/tux/bot.py +++ b/tux/bot.py @@ -19,6 +19,7 @@ from __future__ import annotations import asyncio +import collections.abc import contextlib from collections.abc import Callable, Coroutine from dataclasses import dataclass @@ -113,6 +114,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: ("jishaku", self._load_jishaku), ("cogs", self._load_cogs), ("hot_reload", self._setup_hot_reload), + ("register_tasks", self._register_critical_tasks), ("monitoring", self.task_manager.start), ("instrument_tasks", self.task_manager.setup_task_instrumentation), ("instrument_commands", lambda: instrument_bot_commands(self)), @@ -330,6 +332,94 @@ async def _setup_hot_reload(self) -> None: logger.error(f"Failed to load hot reload extension: {e}") self.sentry_manager.capture_exception(e) + async def _register_critical_tasks(self) -> None: + """ + Registers critical tasks after cogs are loaded. + + This method validates that cogs exist before registering their critical tasks, + ensuring that task registration only happens for cogs that are actually loaded. + """ + with start_span("bot.register_critical_tasks", "Registering critical tasks") as span: + logger.info("Registering critical tasks...") + + try: + # Clear any existing critical tasks to avoid duplicates + self.task_manager.critical_tasks.clear() + self.task_manager.task_metrics.clear() + + # Register critical tasks only for cogs that exist + for task_config in self.task_manager.DEFAULT_CRITICAL_TASKS: + if task_config.cog_name in self.cogs: + self.task_manager.register_critical_task(task_config) + logger.debug(f"Registered critical task: {task_config.name} for cog: {task_config.cog_name}") + else: + logger.warning(f"Cog {task_config.cog_name} not found, skipping task: {task_config.name}") + + span.set_tag("tasks_registered", len(self.task_manager.critical_tasks)) + logger.info(f"Registered {len(self.task_manager.critical_tasks)} critical tasks.") + + except Exception as e: + logger.critical(f"Failed to register critical tasks: {e}") + self.sentry_manager.capture_exception(e) + raise + + def _handle_cog_unload(self, cog_name: str) -> None: + """ + Handle cleanup when a cog is unloaded. + + This method cleans up any critical tasks associated with the unloaded cog + to prevent orphaned task references. + + Parameters + ---------- + cog_name : str + The name of the cog that was unloaded. + """ + logger.debug(f"Handling unload for cog: {cog_name}") + self.task_manager.cleanup_cog_tasks(cog_name) + + async def remove_cog( + self, + name: str, + /, + *, + guild: discord.abc.Snowflake | None = None, + guilds: collections.abc.Sequence[discord.abc.Snowflake] | None = None, + ) -> commands.Cog | None: + """ + Remove a cog and clean up associated tasks. + + This overrides the default remove_cog method to ensure that critical tasks + associated with the unloaded cog are properly cleaned up when the cog is unloaded. + + Parameters + ---------- + name : str + The name of the cog to remove. + guild : discord.abc.Snowflake | None, optional + The guild to remove the cog from, by default None + guilds : collections.abc.Sequence[discord.abc.Snowflake] | None, optional + The guilds to remove the cog from, by default None + + Returns + ------- + commands.Cog | None + The removed cog, or None if it wasn't loaded. + """ + # Remove the cog using the parent method + if guilds is not None: + removed_cog = await super().remove_cog(name, guild=guild, guilds=guilds) + elif guild is not None: + removed_cog = await super().remove_cog(name, guild=guild) + else: + removed_cog = await super().remove_cog(name) + + # Clean up associated tasks if the cog was successfully removed + if removed_cog is not None: + self._handle_cog_unload(name) + + return removed_cog + async def _handle_setup_task(self) -> None: """ Handles the main setup task during shutdown. From 51ba4d4da8fa46b8e50d6e84977140aced526eea Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:07:21 -0400 Subject: [PATCH 16/34] feat(utils): consolidate Sentry SDK usage behind SentryManager abstraction - Add comprehensive SentryManager class with all SDK functionality - Implement tracing and span management methods - Add user and command context setting capabilities - Use if expressions for cleaner code (Sourcery suggestions) - Centralize all Sentry interactions for consistent error reporting --- tux/utils/sentry_manager.py | 89 +++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/tux/utils/sentry_manager.py b/tux/utils/sentry_manager.py index 7fef4807d..ed4ae055f 100644 --- a/tux/utils/sentry_manager.py +++ b/tux/utils/sentry_manager.py @@ -616,3 +616,92 @@ def set_command_context(self, ctx: ContextOrInteraction) -> None: # Set command context using standardized data self.set_context("command", context_data) + + # --- Tracing and Span Management --- + + def get_current_span(self) -> Any | None: + """ + Get the current active span from Sentry. + + Returns + ------- + Any | None + The current span if Sentry is initialized and a span is active, None otherwise. + """ + return sentry_sdk.get_current_span() if self.is_initialized else None + + def start_transaction(self, op: str, name: str, description: str = "") -> Any: + """ + Start a new Sentry transaction. + + Parameters + ---------- + op : str + The operation name for the transaction. + name : str + The name of the transaction. + description : str, optional + A description of the transaction. + + Returns + ------- + Any + The started transaction object. + """ + return ( + sentry_sdk.start_transaction( + op=op, + name=name, + description=description, + ) + if self.is_initialized + else None + ) + + def start_span(self, op: str, description: str = "") -> Any: + """ + Start a new Sentry span. + + Parameters + ---------- + op : str + The operation name for the span. + description : str, optional + A description of the span. + + Returns + ------- + Any + The started span object. + """ + return sentry_sdk.start_span(op=op, description=description) if self.is_initialized else None + + def add_breadcrumb( + self, + message: str, + category: str = "default", + level: LogLevelStr = "info", + data: dict[str, Any] | None = None, + ) -> None: + """ + Add a breadcrumb to the current Sentry scope. + + Parameters + ---------- + message : str + The breadcrumb message. + category : str, optional + The breadcrumb category. + level : LogLevelStr, optional + The breadcrumb level. + data : dict[str, Any] | None, optional + Additional data for the breadcrumb. + """ + if not self.is_initialized: + return + sentry_sdk.add_breadcrumb( + message=message, + category=category, + level=level, + data=data, + ) From 5efa11dc34a6bd92ccc55b853135eabf2324e94c Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:07:27 -0400 Subject: [PATCH 17/34] refactor(utils): redesign task manager for dynamic cog-driven task registration - Remove hardcoded DEFAULT_CRITICAL_TASKS configuration - Add discover_and_register_cog_tasks() method for dynamic task discovery - Implement cog-driven task registration via get_critical_tasks() method - Improve task registration documentation and error handling - Make task management more flexible and maintainable --- tux/utils/task_manager.py | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/tux/utils/task_manager.py b/tux/utils/task_manager.py index 15d0dc80d..eab98218c 100644 --- a/tux/utils/task_manager.py +++ b/tux/utils/task_manager.py @@ -111,16 +111,6 @@ class TaskManager: ("patch_asyncio",): TaskCategory.SYSTEM, } - # Default critical tasks that should be monitored - DEFAULT_CRITICAL_TASKS: ClassVar[list[CriticalTaskConfig]] = [ - CriticalTaskConfig("reminder_processor", "ReminderService", "reminder_processor", TaskPriority.CRITICAL), - CriticalTaskConfig("tempban_checker", "TempBan", "check_tempbans", TaskPriority.HIGH), - CriticalTaskConfig("afk_expiration_handler", "Afk", "handle_afk_expiration", TaskPriority.NORMAL), - CriticalTaskConfig("old_gif_remover", "GifLimiter", "old_gif_remover", TaskPriority.NORMAL), - CriticalTaskConfig("influx_guild_stats", "InfluxLogger", "_log_guild_stats", TaskPriority.LOW), - CriticalTaskConfig("influx_db_logger", "InfluxLogger", "logger", TaskPriority.LOW), - ] - def __init__(self, bot: BotProtocol) -> None: """ Initialize the TaskManager with enhanced monitoring capabilities. @@ -201,7 +191,7 @@ def register_critical_task(self, config: CriticalTaskConfig) -> None: Parameters ---------- config : CriticalTaskConfig - Configuration for the critical task. + Configuration for the critical task to register. """ self.critical_tasks[config.name] = config self.task_metrics[config.name] = TaskMetrics( @@ -211,6 +201,28 @@ def register_critical_task(self, config: CriticalTaskConfig) -> None: ) logger.debug(f"Registered critical task: {config.name}") + def discover_and_register_cog_tasks(self) -> None: + """ + Discover and register critical tasks from all loaded cogs. + + This method asks each cog if it has critical tasks to register, + making the system dynamic and cog-driven instead of hardcoded. + """ + logger.info("Discovering critical tasks from cogs...") + + for cog_name, cog in self.bot.cogs.items(): + # Check if the cog has a method to report its critical tasks + get_tasks_method = getattr(cog, "get_critical_tasks", None) + if get_tasks_method and callable(get_tasks_method): + try: + if task_configs := get_tasks_method(): + for config in task_configs: + self.register_critical_task(config) + logger.debug(f"Discovered task {config.name} from cog {cog_name}") + except Exception as e: + logger.warning(f"Error discovering tasks from cog {cog_name}: {e}") + continue + def unregister_critical_task(self, task_name: str) -> None: """ Unregister a critical task when its cog is unloaded. From 25365f502b3954f218e3cb6ef7406437e936860c Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:07:32 -0400 Subject: [PATCH 18/34] feat(utils): enhance BotProtocol with runtime checking and add_cog method - Add @runtime_checkable decorator for runtime protocol checking - Add add_cog method to BotProtocol for hot reload compatibility - Make sentry_manager required (not optional) to match Tux implementation - Improve type safety for dependency injection and Union types --- tux/utils/protocols.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tux/utils/protocols.py b/tux/utils/protocols.py index 203083fe9..9499d3a6d 100644 --- a/tux/utils/protocols.py +++ b/tux/utils/protocols.py @@ -15,7 +15,8 @@ from __future__ import annotations from collections.abc import Mapping -from typing import TYPE_CHECKING, Protocol +from types import ModuleType +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable if TYPE_CHECKING: from discord.ext import commands @@ -23,10 +24,20 @@ from tux.utils.sentry_manager import SentryManager +@runtime_checkable class BotProtocol(Protocol): """A protocol for the bot instance to provide necessary attributes.""" @property def cogs(self) -> Mapping[str, commands.Cog]: ... + @property + def extensions(self) -> Mapping[str, ModuleType]: ... + + help_command: Any + sentry_manager: SentryManager + + async def load_extension(self, name: str) -> None: ... + async def reload_extension(self, name: str) -> None: ... + async def add_cog(self, cog: commands.Cog, /, *, override: bool = False) -> None: ... From a55c50cf95c0f694cce9d33727d712e720b54ae3 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:08:03 -0400 Subject: [PATCH 19/34] refactor(utils): refactor hot reload to use SentryManager and improve type safety - Remove direct sentry_sdk usage, use SentryManager abstraction - Add Union type support for bot parameter (commands.Bot | BotProtocol) - Use runtime protocol checking with isinstance() for sentry_manager access - Merge nested if statements for cleaner code (linter suggestions) - Improve error handling and type safety throughout hot reload system --- tux/utils/hot_reload.py | 215 ++++++++++++++++++++-------------------- 1 file changed, 106 insertions(+), 109 deletions(-) diff --git a/tux/utils/hot_reload.py b/tux/utils/hot_reload.py index 299934393..2be629e33 100644 --- a/tux/utils/hot_reload.py +++ b/tux/utils/hot_reload.py @@ -5,6 +5,8 @@ with comprehensive error handling and performance monitoring. """ +from __future__ import annotations + import ast import asyncio import hashlib @@ -14,37 +16,24 @@ import sys import time from abc import ABC, abstractmethod -from collections.abc import Callable, Mapping, Sequence +from collections.abc import Callable, Sequence from contextlib import contextmanager, suppress from dataclasses import dataclass, field from pathlib import Path -from types import ModuleType from typing import Any, Protocol, TypeVar, cast -import sentry_sdk import watchdog.events import watchdog.observers from discord.ext import commands from loguru import logger +from tux.utils.protocols import BotProtocol from tux.utils.tracing import span # Type variables and protocols F = TypeVar("F", bound=Callable[..., Any]) -class BotProtocol(Protocol): - """Protocol for bot-like objects.""" - - @property - def extensions(self) -> Mapping[str, ModuleType]: ... - - help_command: Any - - async def load_extension(self, name: str) -> None: ... - async def reload_extension(self, name: str) -> None: ... - - class FileSystemWatcherProtocol(Protocol): """Protocol for file system watchers.""" @@ -260,9 +249,8 @@ def reload_module_by_name(module_name: str) -> bool: importlib.reload(sys.modules[module_name]) except Exception as e: logger.error(f"Failed to reload module {module_name}: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) - return False + # No sentry_manager available here, so no Sentry capture + raise else: logger.debug(f"Reloaded module {module_name}") return True @@ -382,9 +370,8 @@ def scan_class_definitions(self, file_path: Path, module_name: str) -> dict[str, except Exception as e: logger.debug(f"Error scanning class definitions in {file_path}: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) - return {} + # No sentry_manager available here, so no Sentry capture + raise else: return classes @@ -458,9 +445,8 @@ def scan_dependencies(self, file_path: Path) -> set[str]: except Exception as e: logger.debug(f"Error scanning dependencies in {file_path}: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) - return set() + # No sentry_manager available here, so no Sentry capture + raise else: return dependencies @@ -698,9 +684,8 @@ def hot_patch_class(self, module_name: str, class_name: str, new_class: type) -> setattr(module, class_name, new_class) except Exception as e: logger.error(f"Failed to hot patch class {class_name} in {module_name}: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) - return False + # No sentry_manager available here, so no Sentry capture + raise else: logger.info(f"Hot patched class {class_name} in {module_name}") return True @@ -717,30 +702,47 @@ def cleanup_context(self): class CogWatcher(watchdog.events.FileSystemEventHandler): - """Enhanced cog watcher with smart dependency tracking and improved error handling.""" - - def __init__(self, bot: BotProtocol, path: str, *, recursive: bool = True, config: HotReloadConfig | None = None): - """Initialize the cog watcher with validation.""" - self._config = config or HotReloadConfig() - validate_config(self._config) + """ + Watches for file changes and automatically reloads affected cogs. - watch_path = Path(path) - if not watch_path.exists(): - msg = f"Watch path does not exist: {path}" - raise FileWatchError(msg) + This class extends watchdog's FileSystemEventHandler to monitor Python files + in the bot's directory and trigger automatic reloading when changes are detected. + """ + def __init__( + self, + bot: commands.Bot | BotProtocol, + path: str, + *, + recursive: bool = True, + config: HotReloadConfig | None = None, + ): + """ + Initialize the cog watcher. + + Parameters + ---------- + bot : commands.Bot | BotProtocol + The bot instance to reload extensions on. + path : str + The directory path to watch for changes. + recursive : bool, optional + Whether to watch subdirectories recursively, by default True + config : HotReloadConfig | None, optional + Configuration for the hot reload system, by default None + """ + super().__init__() self.bot = bot - self.path = str(watch_path.resolve()) + self.watch_path = Path(path) self.recursive = recursive + self.config = config or HotReloadConfig() + self.extension_map: dict[str, str] = {} + self.dependency_graph = DependencyGraph(self.config) + self.file_hash_tracker = FileHashTracker() + self.class_tracker = ClassDefinitionTracker() + self.debounce_timers: dict[str, asyncio.TimerHandle] = {} self.observer = watchdog.observers.Observer() - self.observer.schedule(self, self.path, recursive=recursive) - self.base_dir = Path(__file__).parent.parent - - # Store a relative path for logging - try: - self.display_path = str(Path(path).relative_to(self.base_dir.parent)) - except ValueError: - self.display_path = path + self.pending_tasks: list[asyncio.Task[None]] = [] # Store the main event loop from the calling thread try: @@ -749,23 +751,15 @@ def __init__(self, bot: BotProtocol, path: str, *, recursive: bool = True, confi msg = "Hot reload must be initialized from within an async context" raise HotReloadError(msg) from e - # Track special files + # Set up base directory and help file path + self.base_dir = Path(__file__).parent.parent self.help_file_path = self.base_dir / "help.py" - # Extension tracking - self.path_to_extension: dict[str, str] = {} - self.pending_tasks: list[asyncio.Task[None]] = [] - - # Enhanced dependency tracking - self.dependency_graph = DependencyGraph(self._config) - - # Debouncing configuration - self._debounce_timers: dict[str, asyncio.Handle] = {} - - # Build initial extension map + # Build the extension map and populate file hashes self._build_extension_map() - - logger.debug(f"CogWatcher initialized for path: {self.display_path}") + if self.config.prepopulate_hashes: + cached_count = self._populate_all_file_hashes() + logger.debug(f"Pre-populated {cached_count} file hashes") @span("watcher.build_extension_map") def _build_extension_map(self) -> None: @@ -779,15 +773,15 @@ def _build_extension_map(self) -> None: try: path = path_from_extension(extension) if path.exists(): - self.path_to_extension[str(path)] = extension + self.extension_map[str(path)] = extension self.dependency_graph.update_dependencies(path, extension) extension_count += 1 else: logger.warning(f"Could not find file for extension {extension}, expected at {path}") except Exception as e: logger.error(f"Error processing extension {extension}: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture + raise # Pre-populate hash cache for all Python files in watched directories # This eliminates "first encounter" issues for any file @@ -802,16 +796,16 @@ def _populate_all_file_hashes(self) -> int: Pre-populate hash cache for all files in watched directories matching configured extensions. This can be disabled via configuration to avoid startup overhead. """ - if not self._config.prepopulate_hashes: + if not self.config.prepopulate_hashes: logger.debug("Hash pre-population disabled in configuration") return 0 cached_count = 0 # Get the root watch path (this includes the entire tux directory) - watch_root = Path(self.path) + watch_root = Path(self.watch_path) - for ext in self._config.hash_extensions: + for ext in self.config.hash_extensions: for file_path in watch_root.rglob(f"*{ext}"): try: # Pre-populate cache silently using the public method @@ -819,6 +813,8 @@ def _populate_all_file_hashes(self) -> int: cached_count += 1 except Exception as e: logger.warning(f"Failed to hash {file_path}: {e}") + # No sentry_manager available here, so no Sentry capture + raise return cached_count @@ -826,7 +822,7 @@ def start(self) -> None: """Start watching for file changes.""" try: self.observer.start() - logger.info(f"Hot reload watching {self.display_path}") + logger.info(f"Hot reload watching {self.watch_path}") except Exception as e: msg = f"Failed to start file watcher: {e}" raise FileWatchError(msg) from e @@ -847,9 +843,9 @@ def stop(self) -> None: task.cancel() # Cancel debounce timers - for timer in self._debounce_timers.values(): + for timer in self.debounce_timers.values(): timer.cancel() - self._debounce_timers.clear() + self.debounce_timers.clear() logger.info("Stopped watching for changes") @@ -875,13 +871,13 @@ def on_modified(self, event: watchdog.events.FileSystemEvent) -> None: file_key = str(file_path) # Cancel existing debounce timer if any - if file_key in self._debounce_timers: - self._debounce_timers[file_key].cancel() + if file_key in self.debounce_timers: + self.debounce_timers[file_key].cancel() # Set new debounce timer try: - self._debounce_timers[file_key] = self.loop.call_later( - self._config.debounce_delay, + self.debounce_timers[file_key] = self.loop.call_later( + self.config.debounce_delay, self._handle_file_change_debounced, file_path, ) @@ -901,11 +897,11 @@ def _handle_file_change_debounced(self, file_path: Path) -> None: file_key = str(file_path) # Remove from debounce tracking - if file_key in self._debounce_timers: - del self._debounce_timers[file_key] + if file_key in self.debounce_timers: + del self.debounce_timers[file_key] # Validate syntax before attempting reload (if enabled) - if self._config.validate_syntax and file_path.suffix == ".py" and not validate_python_syntax(file_path): + if self.config.validate_syntax and file_path.suffix == ".py" and not validate_python_syntax(file_path): logger.debug(f"Skipping hot reload for {file_path.name} due to syntax errors") return @@ -918,8 +914,8 @@ def _handle_file_change_debounced(self, file_path: Path) -> None: self._handle_extension_file(file_path) except Exception as e: logger.error(f"Error handling file change for {file_path}: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture + raise def _handle_special_files(self, file_path: Path) -> bool: """Handle special files like help.py and __init__.py.""" @@ -943,7 +939,7 @@ def _handle_extension_file(self, file_path: Path) -> None: self.dependency_graph.update_dependencies(file_path, module_name) # Check direct mapping first - if extension := self.path_to_extension.get(str(file_path)): + if extension := self.extension_map.get(str(file_path)): self._reload_extension(extension) return @@ -1014,7 +1010,7 @@ def _process_extension_reload(self, extension: str, file_path: Path | None = Non self._reload_extension(extension) if file_path: - self.path_to_extension[str(file_path)] = extension + self.extension_map[str(file_path)] = extension @span("watcher.try_reload_variations") def _try_reload_extension_variations(self, extension: str, file_path: Path) -> bool: @@ -1030,7 +1026,7 @@ def _try_reload_extension_variations(self, extension: str, file_path: Path) -> b shorter_ext = ".".join(parts[:i]) if shorter_ext in self.bot.extensions: logger.warning(f"Skipping reload of {extension} as parent module {shorter_ext} already loaded") - self.path_to_extension[str(file_path)] = shorter_ext + self.extension_map[str(file_path)] = shorter_ext return True # Check parent modules @@ -1074,8 +1070,8 @@ def _handle_init_file_change(self, init_file_path: Path) -> None: self._process_extension_reload(ext) except Exception as e: logger.error(f"Error handling __init__.py change for {init_file_path}: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture + raise def _collect_extensions_to_reload(self, full_package: str, short_package: str) -> list[str]: """Collect extensions that need to be reloaded based on package names.""" @@ -1098,8 +1094,8 @@ def _reload_extension(self, extension: str) -> None: asyncio.run_coroutine_threadsafe(self._async_reload_extension(extension), self.loop) except Exception as e: logger.error(f"Failed to schedule reload of extension {extension}: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture + raise def _reload_help(self) -> None: """Reload the help command with proper error handling.""" @@ -1108,8 +1104,8 @@ def _reload_help(self) -> None: asyncio.run_coroutine_threadsafe(self._async_reload_help(), self.loop) except Exception as e: logger.error(f"Failed to schedule reload of help command: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture + raise @span("reload.extension") async def _async_reload_extension(self, extension: str) -> None: @@ -1162,12 +1158,13 @@ async def _handle_extension_not_loaded(self, extension: str) -> None: # Update our mapping path = path_from_extension(extension) - self.path_to_extension[str(path)] = extension + self.extension_map[str(path)] = extension except commands.ExtensionError as e: logger.error(f"❌ Failed to load new extension {extension}: {e}") - # Only send to Sentry if it's not a common development error - if sentry_sdk.is_initialized() and not self._is_development_error(e): - sentry_sdk.capture_exception(e) + # Only send to Sentry if it's not a common development error and bot supports it + if not self._is_development_error(e) and isinstance(self.bot, BotProtocol): + self.bot.sentry_manager.capture_exception(e) + raise async def _reload_extension_core(self, extension: str) -> None: """Core extension reloading logic.""" @@ -1178,9 +1175,9 @@ async def _reload_extension_core(self, extension: str) -> None: raise except commands.ExtensionError as e: logger.error(f"❌ Failed to reload extension {extension}: {e}") - # Only send to Sentry if it's not a common development error - if sentry_sdk.is_initialized() and not self._is_development_error(e): - sentry_sdk.capture_exception(e) + # Only send to Sentry if it's not a common development error and bot supports it + if not self._is_development_error(e) and isinstance(self.bot, BotProtocol): + self.bot.sentry_manager.capture_exception(e) raise @span("reload.help") @@ -1203,12 +1200,12 @@ async def _async_reload_help(self) -> None: logger.info("✅ Reloaded help command") except (AttributeError, ImportError) as e: logger.error(f"Error accessing TuxHelp class: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture + raise except Exception as e: logger.error(f"❌ Failed to reload help command: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture + raise @span("reload.flag_dependent_cogs") def _reload_flag_class_dependent_cogs(self) -> None: @@ -1414,8 +1411,8 @@ async def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: logger.info("🔥 Hot reload active") except Exception as e: logger.error(f"Failed to start hot reload system: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture + raise return result @@ -1463,9 +1460,8 @@ def auto_discover_cogs(path: str = "cogs") -> list[str]: continue except Exception as e: logger.error(f"Error during cog discovery: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) - return [] + # No sentry_manager available here, so no Sentry capture + raise else: return sorted(discovered) @@ -1473,7 +1469,7 @@ def auto_discover_cogs(path: str = "cogs") -> list[str]: class HotReload(commands.Cog): """Hot reload cog for backward compatibility and direct usage.""" - def __init__(self, bot: commands.Bot) -> None: + def __init__(self, bot: commands.Bot | BotProtocol) -> None: self.bot = bot logger.debug(f"Initializing HotReload cog with {len(bot.extensions)} loaded extensions") @@ -1485,8 +1481,9 @@ def __init__(self, bot: commands.Bot) -> None: self.watcher.start() except Exception as e: logger.error(f"Failed to initialize hot reload watcher: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # Type-safe access to sentry_manager + if isinstance(bot, BotProtocol): + bot.sentry_manager.capture_exception(e) raise async def cog_unload(self) -> None: @@ -1499,7 +1496,7 @@ async def cog_unload(self) -> None: logger.error(f"Error during HotReload cog unload: {e}") -async def setup(bot: commands.Bot) -> None: +async def setup(bot: commands.Bot | BotProtocol) -> None: """Set up the hot reload cog.""" logger.info("Setting up hot reloader") logger.debug(f"Bot has {len(bot.extensions)} extensions loaded") @@ -1511,11 +1508,11 @@ async def setup(bot: commands.Bot) -> None: logger.warning(f" - {issue}") try: + # The actual bot instance will have the required attributes await bot.add_cog(HotReload(bot)) except Exception as e: logger.error(f"Failed to setup hot reload cog: {e}") - if sentry_sdk.is_initialized(): - sentry_sdk.capture_exception(e) + # No sentry_manager available here, so no Sentry capture raise From 270b4e19b41bd13ade9ef92b9496f11694b33c90 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:08:12 -0400 Subject: [PATCH 20/34] refactor(bot): update bot to use dynamic task discovery and remove hardcoded checks - Replace hardcoded task registration with dynamic discovery - Remove _is_cog_functional method in favor of cog-driven approach - Use task_manager.discover_and_register_cog_tasks() for flexible task registration - Improve separation of concerns between bot and cogs - Make task management more maintainable and extensible --- tux/bot.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/tux/bot.py b/tux/bot.py index 783d1b391..5f328874e 100644 --- a/tux/bot.py +++ b/tux/bot.py @@ -102,7 +102,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self._startup_task: asyncio.Task[None] | None = None # Sub-systems and managers that encapsulate specific functionalities. - self.sentry_manager = SentryManager() + self.sentry_manager: SentryManager = SentryManager() self.emoji_manager = EmojiManager(self) self.task_manager = TaskManager(self) self.console = Console(stderr=True, force_terminal=True) @@ -336,8 +336,8 @@ async def _register_critical_tasks(self) -> None: """ Registers critical tasks after cogs are loaded. - This method validates that cogs exist before registering their critical tasks, - ensuring that task registration only happens for cogs that are actually loaded. + This method uses dynamic discovery to find critical tasks from cogs, + making the system more flexible and cog-driven. """ with start_span("bot.register_critical_tasks", "Registering critical tasks") as span: logger.info("Registering critical tasks...") @@ -347,13 +347,8 @@ async def _register_critical_tasks(self) -> None: self.task_manager.critical_tasks.clear() self.task_manager.task_metrics.clear() - # Register critical tasks only for cogs that exist - for task_config in self.task_manager.DEFAULT_CRITICAL_TASKS: - if task_config.cog_name in self.cogs: - self.task_manager.register_critical_task(task_config) - logger.debug(f"Registered critical task: {task_config.name} for cog: {task_config.cog_name}") - else: - logger.warning(f"Cog {task_config.cog_name} not found, skipping task: {task_config.name}") + # Discover and register tasks from cogs dynamically + self.task_manager.discover_and_register_cog_tasks() span.set_tag("tasks_registered", len(self.task_manager.critical_tasks)) logger.info(f"Registered {len(self.task_manager.critical_tasks)} critical tasks.") From 93b9d4543152c7310552d794096339d1caa1443f Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:08:18 -0400 Subject: [PATCH 21/34] refactor(handlers): update sentry handler to use SentryManager abstraction - Replace direct sentry_sdk calls with SentryManager methods - Use set_tag and finish_transaction_on_error from SentryManager - Improve consistency with centralized error reporting approach - Maintain same functionality while using abstraction layer --- tux/handlers/sentry.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tux/handlers/sentry.py b/tux/handlers/sentry.py index 476cdb14e..5a4c0dbf9 100644 --- a/tux/handlers/sentry.py +++ b/tux/handlers/sentry.py @@ -3,7 +3,6 @@ from typing import Any import discord -import sentry_sdk from discord.ext import commands from loguru import logger @@ -47,7 +46,7 @@ async def on_command_completion(self, ctx: commands.Context[Tux]) -> None: ctx : commands.Context[Tux] The command context """ - if self.sentry_manager.is_initialized and (span := sentry_sdk.get_current_span()): + if span := self.sentry_manager.get_current_span(): span.set_status(self.sentry_manager.STATUS["OK"]) logger.trace(f"Set Sentry span status to 'ok' for command: {ctx.command}") @@ -63,7 +62,7 @@ async def on_app_command_completion(self, interaction: discord.Interaction, comm command : CommandObject The command that was completed """ - if self.sentry_manager.is_initialized and (span := sentry_sdk.get_current_span()): + if span := self.sentry_manager.get_current_span(): span.set_status(self.sentry_manager.STATUS["OK"]) logger.trace(f"Set Sentry span status to 'ok' for app command: {command.name}") From 7d833e637835fdc5147e68b3ad81168eebff9ae8 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:08:24 -0400 Subject: [PATCH 22/34] feat(cogs): add get_critical_tasks method to InfluxLogger for dynamic task registration - Replace is_functional method with get_critical_tasks for new task manager - Only register influx_db_logger task when InfluxDB is properly configured - Add proper imports for CriticalTaskConfig and TaskPriority - Improve task registration to be conditional on cog functionality - Prevent task registration when InfluxDB environment variables are missing --- tux/cogs/services/influxdblogger.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tux/cogs/services/influxdblogger.py b/tux/cogs/services/influxdblogger.py index a4044b016..2dff08582 100644 --- a/tux/cogs/services/influxdblogger.py +++ b/tux/cogs/services/influxdblogger.py @@ -9,6 +9,7 @@ from tux.bot import Tux from tux.database.controllers import DatabaseController from tux.utils.config import CONFIG +from tux.utils.task_manager import CriticalTaskConfig, TaskPriority class InfluxLogger(commands.Cog): @@ -45,6 +46,19 @@ def init_influx(self) -> bool: return True return False + def get_critical_tasks(self) -> list[CriticalTaskConfig]: + """Get critical tasks for this cog. + + Returns + ------- + list[CriticalTaskConfig] + List of critical task configurations, or empty list if not functional + """ + # Only register the task if InfluxDB is properly configured + if self.influx_write_api is not None: + return [CriticalTaskConfig("influx_db_logger", "InfluxLogger", "logger", TaskPriority.LOW)] + return [] + @tasks.loop(seconds=60) async def logger(self) -> None: """Log statistics to InfluxDB at regular intervals. From b4d7d909d0eafeab917f595cb9fd1971a2a7d464 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:08:31 -0400 Subject: [PATCH 23/34] feat(cogs): add get_critical_tasks method to GifLimiter for dynamic task registration - Add get_critical_tasks method to register old_gif_remover task - Import CriticalTaskConfig and TaskPriority from task_manager - Enable automatic task registration and monitoring for GIF cleanup - Follow new cog-driven task management pattern --- tux/cogs/services/gif_limiter.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tux/cogs/services/gif_limiter.py b/tux/cogs/services/gif_limiter.py index b9f7a694d..6c57970f2 100644 --- a/tux/cogs/services/gif_limiter.py +++ b/tux/cogs/services/gif_limiter.py @@ -7,6 +7,7 @@ from tux.bot import Tux from tux.utils.config import CONFIG +from tux.utils.task_manager import CriticalTaskConfig, TaskPriority class GifLimiter(commands.Cog): @@ -147,6 +148,16 @@ async def cog_unload(self) -> None: """Cancel the background task when the cog is unloaded.""" self.old_gif_remover.cancel() + def get_critical_tasks(self) -> list[CriticalTaskConfig]: + """Get critical tasks for this cog. + + Returns + ------- + list[CriticalTaskConfig] + List of critical task configurations + """ + return [CriticalTaskConfig("old_gif_remover", "GifLimiter", "old_gif_remover", TaskPriority.NORMAL)] + async def setup(bot: Tux) -> None: await bot.add_cog(GifLimiter(bot)) From 11c5d908d24c6564ad6d154094b5e538fa6aedf1 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:08:40 -0400 Subject: [PATCH 24/34] feat(cogs): add get_critical_tasks method to TempBan for dynamic task registration - Add get_critical_tasks method to register tempban_check task - Import CriticalTaskConfig and TaskPriority from task_manager - Enable automatic task registration and monitoring for tempban expiration - Follow new cog-driven task management pattern for moderation tasks --- tux/cogs/moderation/tempban.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tux/cogs/moderation/tempban.py b/tux/cogs/moderation/tempban.py index 4641de854..3f18b67e7 100644 --- a/tux/cogs/moderation/tempban.py +++ b/tux/cogs/moderation/tempban.py @@ -10,6 +10,7 @@ from tux.utils import checks from tux.utils.flags import TempBanFlags from tux.utils.functions import generate_usage +from tux.utils.task_manager import CriticalTaskConfig, TaskPriority from . import ModerationCogBase @@ -200,6 +201,16 @@ async def cog_unload(self) -> None: """Cancel the tempban check loop when the cog is unloaded.""" self.tempban_check.cancel() + def get_critical_tasks(self) -> list[CriticalTaskConfig]: + """Get critical tasks for this cog. + + Returns + ------- + list[CriticalTaskConfig] + List of critical task configurations + """ + return [CriticalTaskConfig("tempban_checker", "TempBan", "tempban_check", TaskPriority.HIGH)] + async def setup(bot: Tux) -> None: await bot.add_cog(TempBan(bot)) From 4394b407524f233cdef694221ad7e5bb90cdfabb Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 17:08:46 -0400 Subject: [PATCH 25/34] feat(cogs): add get_critical_tasks method to Afk for dynamic task registration - Add get_critical_tasks method to register handle_afk_expiration task - Import CriticalTaskConfig and TaskPriority from task_manager - Enable automatic task registration and monitoring for AFK expiration - Follow new cog-driven task management pattern for utility tasks --- tux/cogs/utility/afk.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tux/cogs/utility/afk.py b/tux/cogs/utility/afk.py index bafaec050..2a72b2364 100644 --- a/tux/cogs/utility/afk.py +++ b/tux/cogs/utility/afk.py @@ -12,6 +12,7 @@ from tux.cogs.utility import add_afk, del_afk from tux.database.controllers import DatabaseController from tux.utils.functions import generate_usage +from tux.utils.task_manager import CriticalTaskConfig, TaskPriority # TODO: add `afk until` command, or add support for providing a timeframe in the regular `afk` and `permafk` commands @@ -220,6 +221,16 @@ async def _get_expired_afk_entries(self, guild_id: int) -> list[AFKModel]: return [entry for entry in entries if entry.until is not None and entry.until < current_time] + def get_critical_tasks(self) -> list[CriticalTaskConfig]: + """Get critical tasks for this cog. + + Returns + ------- + list[CriticalTaskConfig] + List of critical task configurations + """ + return [CriticalTaskConfig("afk_expiration_handler", "Afk", "handle_afk_expiration", TaskPriority.NORMAL)] + async def setup(bot: Tux) -> None: await bot.add_cog(Afk(bot)) From b22618d4f6581f2d858cd3db0aeeebd417481b3e Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 19:11:59 -0400 Subject: [PATCH 26/34] fix(utils): resolve type errors in task manager with proper Protocol and type annotations - Add CriticalTasksProvider Protocol with @runtime_checkable decorator - Use isinstance() check with Protocol instead of getattr() for type safety - Add proper type annotations for task discovery method - Fix pyright type checking errors for task configuration handling - Improve type safety for cog-driven task registration --- tux/utils/task_manager.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/tux/utils/task_manager.py b/tux/utils/task_manager.py index eab98218c..420964482 100644 --- a/tux/utils/task_manager.py +++ b/tux/utils/task_manager.py @@ -24,7 +24,7 @@ from collections.abc import Callable, Coroutine from dataclasses import dataclass, field from enum import Enum, auto -from typing import Any, ClassVar, NamedTuple, cast +from typing import Any, ClassVar, NamedTuple, Protocol, cast, runtime_checkable from discord.ext import tasks from loguru import logger @@ -93,6 +93,13 @@ class CriticalTaskConfig: health_check_interval: float = 300.0 # 5 minutes +@runtime_checkable +class CriticalTasksProvider(Protocol): + """Protocol for cogs that provide critical tasks.""" + + def get_critical_tasks(self) -> list[CriticalTaskConfig]: ... + + class TaskManager: """ Enhanced task manager with health monitoring, metrics, and recovery capabilities. @@ -205,20 +212,17 @@ def discover_and_register_cog_tasks(self) -> None: """ Discover and register critical tasks from all loaded cogs. - This method asks each cog if it has critical tasks to register, - making the system dynamic and cog-driven instead of hardcoded. + This method iterates through all loaded cogs and looks for a + `get_critical_tasks` method. If found, it calls the method to + get a list of CriticalTaskConfig objects and registers them. """ - logger.info("Discovering critical tasks from cogs...") - for cog_name, cog in self.bot.cogs.items(): - # Check if the cog has a method to report its critical tasks - get_tasks_method = getattr(cog, "get_critical_tasks", None) - if get_tasks_method and callable(get_tasks_method): + if isinstance(cog, CriticalTasksProvider): try: - if task_configs := get_tasks_method(): - for config in task_configs: - self.register_critical_task(config) - logger.debug(f"Discovered task {config.name} from cog {cog_name}") + task_configs = cog.get_critical_tasks() + for config in task_configs: + self.register_critical_task(config) + logger.debug(f"Discovered task {config.name} from cog {cog_name}") except Exception as e: logger.warning(f"Error discovering tasks from cog {cog_name}: {e}") continue From b216cee080d480a962334f9c90ef13bc2fcd512b Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 19:14:22 -0400 Subject: [PATCH 27/34] feat(cli): add check-all command for comprehensive development validation - Add check-all command that runs both pre-commit and type checking - Ensures local development catches the same issues as CI - Provides single command for complete code validation - Helps prevent type errors from reaching GitHub Actions --- tux/cli/dev.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tux/cli/dev.py b/tux/cli/dev.py index 96ad5a46d..4296ef6b9 100644 --- a/tux/cli/dev.py +++ b/tux/cli/dev.py @@ -38,3 +38,15 @@ def type_check() -> int: def check() -> int: """Run pre-commit checks.""" return run_command(["pre-commit", "run", "--all-files"]) + + +@command_registration_decorator(dev_group, name="check-all") +def check_all() -> int: + """Run all development checks (pre-commit + type checking).""" + # Run pre-commit first + pre_commit_result = run_command(["pre-commit", "run", "--all-files"]) + if pre_commit_result != 0: + return pre_commit_result + + # Then run type checking + return run_command(["pyright"]) From e46b4a37d4e5342a314e19bae9a7b01e82737e49 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Sun, 20 Jul 2025 19:14:22 -0400 Subject: [PATCH 28/34] feat(cli): add check-all command for comprehensive development validation - Add check-all command that runs both pre-commit and type checking - Ensures local development catches the same issues as CI - Provides single command for complete code validation - Helps prevent type errors from reaching GitHub Actions --- tux/bot.py | 109 ++++++++++++-------- tux/handlers/sentry.py | 192 +++++++++++++++++++++++++++++++----- tux/utils/hot_reload.py | 1 + tux/utils/sentry_manager.py | 36 +++++-- tux/utils/tracing.py | 73 ++++++++++++++ 5 files changed, 338 insertions(+), 73 deletions(-) diff --git a/tux/bot.py b/tux/bot.py index 5f328874e..c32d4f7ae 100644 --- a/tux/bot.py +++ b/tux/bot.py @@ -39,7 +39,13 @@ from tux.utils.env import is_dev_mode from tux.utils.sentry_manager import SentryManager from tux.utils.task_manager import TaskManager -from tux.utils.tracing import instrument_bot_commands, start_span, start_transaction +from tux.utils.tracing import ( + instrument_bot_commands, + set_setup_phase_tag, + set_span_error, + start_span, + start_transaction, +) # Type hint for discord.ext.tasks.Loop type TaskLoop = tasks.Loop[Callable[[], Coroutine[Any, Any, None]]] @@ -107,24 +113,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.task_manager = TaskManager(self) self.console = Console(stderr=True, force_terminal=True) - # Bot lifecycle routines are defined as lists of (name, function) tuples. - # This makes the setup and shutdown sequences clear and easy to modify. - self.setup_steps = [ - ("database", self._setup_database), - ("jishaku", self._load_jishaku), - ("cogs", self._load_cogs), - ("hot_reload", self._setup_hot_reload), - ("register_tasks", self._register_critical_tasks), - ("monitoring", self.task_manager.start), - ("instrument_tasks", self.task_manager.setup_task_instrumentation), - ("instrument_commands", lambda: instrument_bot_commands(self)), - ] - - self.shutdown_steps = [ - ("handle_setup_task", self._handle_setup_task), - ("cleanup_tasks", self.task_manager.cancel_all_tasks), - ("close_connections", self._close_connections), - ] + # Bot lifecycle routines are now inlined directly in setup() and shutdown() methods + # for better readability and explicit sequencing # The main setup routine is started as a background task immediately. logger.debug("Creating bot setup task") @@ -137,10 +127,9 @@ async def setup(self) -> None: """ Executes the bot's startup routine in a defined sequence. - This method iterates through the `setup_steps` list, awaiting each - asynchronous setup method to ensure the bot is properly initialized - before it goes online. If any step fails, the setup is aborted, and - a graceful shutdown is triggered. + This method performs each setup step in order to ensure the bot is properly + initialized before it goes online. If any step fails, the setup is aborted, + and a graceful shutdown is triggered. Raises ------ @@ -150,13 +139,45 @@ async def setup(self) -> None: """ try: with start_span("bot.setup", "Bot setup process") as span: - for name, step_func in self.setup_steps: - span.set_tag("setup_phase", f"{name}_starting") - if asyncio.iscoroutinefunction(step_func): - await step_func() - else: - step_func() - span.set_tag("setup_phase", f"{name}_finished") + # Database connection + set_setup_phase_tag(span, "database", "starting") + await self._setup_database() + set_setup_phase_tag(span, "database", "finished") + + # Load jishaku extension + set_setup_phase_tag(span, "jishaku", "starting") + await self._load_jishaku() + set_setup_phase_tag(span, "jishaku", "finished") + + # Load all cogs + set_setup_phase_tag(span, "cogs", "starting") + await self._load_cogs() + set_setup_phase_tag(span, "cogs", "finished") + + # Setup hot reload + set_setup_phase_tag(span, "hot_reload", "starting") + await self._setup_hot_reload() + set_setup_phase_tag(span, "hot_reload", "finished") + + # Register critical tasks + set_setup_phase_tag(span, "register_tasks", "starting") + await self._register_critical_tasks() + set_setup_phase_tag(span, "register_tasks", "finished") + + # Start monitoring + set_setup_phase_tag(span, "monitoring", "starting") + self.task_manager.start() + set_setup_phase_tag(span, "monitoring", "finished") + + # Setup task instrumentation + set_setup_phase_tag(span, "instrument_tasks", "starting") + self.task_manager.setup_task_instrumentation() + set_setup_phase_tag(span, "instrument_tasks", "finished") + + # Setup command instrumentation + set_setup_phase_tag(span, "instrument_commands", "starting") + instrument_bot_commands(self) + set_setup_phase_tag(span, "instrument_commands", "finished") except Exception as e: # If any part of the setup fails, log the critical error @@ -189,11 +210,20 @@ async def shutdown(self) -> None: transaction.set_tag("shutdown_initiated", True) logger.info("Shutting down...") - # Iterate through the defined shutdown steps. - for name, step_func in self.shutdown_steps: - transaction.set_tag(f"{name}_handled", False) - await step_func() - transaction.set_tag(f"{name}_handled", True) + # Handle setup task cleanup + transaction.set_tag("handle_setup_task_handled", False) + await self._handle_setup_task() + transaction.set_tag("handle_setup_task_handled", True) + + # Cancel all tasks + transaction.set_tag("cleanup_tasks_handled", False) + await self.task_manager.cancel_all_tasks() + transaction.set_tag("cleanup_tasks_handled", True) + + # Close connections + transaction.set_tag("close_connections_handled", False) + await self._close_connections() + transaction.set_tag("close_connections_handled", True) logger.info("Bot shutdown complete.") @@ -268,8 +298,7 @@ async def _setup_database(self) -> None: logger.info(f"Database models registered: {db.is_registered()}") except Exception as e: - span.set_status("internal_error") - span.set_data("error", str(e)) + set_span_error(span, e, "db_error") raise async def _load_jishaku(self) -> None: @@ -306,7 +335,7 @@ async def _load_cogs(self) -> None: except Exception as e: logger.critical(f"Error loading cogs: {e}") span.set_tag("cogs_loaded", False) - span.set_data("error", str(e)) + set_span_error(span, e, "error") self.sentry_manager.capture_exception(e) raise @@ -446,7 +475,7 @@ async def _close_discord(self) -> None: except Exception as e: logger.error(f"Error during Discord shutdown: {e}") span.set_tag("discord_closed", False) - span.set_data("discord_error", str(e)) + set_span_error(span, e, "discord_error") self.sentry_manager.capture_exception(e) async def _close_database(self) -> None: @@ -465,7 +494,7 @@ async def _close_database(self) -> None: except Exception as e: logger.critical(f"Error during database disconnection: {e}") span.set_tag("db_closed", False) - span.set_data("db_error", str(e)) + set_span_error(span, e, "db_error") self.sentry_manager.capture_exception(e) # --- Internal Helpers --- diff --git a/tux/handlers/sentry.py b/tux/handlers/sentry.py index 5a4c0dbf9..abd945fab 100644 --- a/tux/handlers/sentry.py +++ b/tux/handlers/sentry.py @@ -1,13 +1,12 @@ -from __future__ import annotations - -from typing import Any +from typing import Any, ClassVar import discord +import sentry_sdk from discord.ext import commands from loguru import logger from tux.bot import Tux -from tux.utils.sentry_manager import SentryManager +from tux.utils.tracing import capture_span_exception, set_span_attributes, set_span_status # Type alias using PEP695 syntax type CommandObject = ( @@ -17,56 +16,195 @@ class SentryHandler(commands.Cog): """ - Handles Sentry transaction status for completed commands. + Handles Sentry error tracking and status management for commands and interactions. - This cog listens for command completion events to set the Sentry - transaction status to 'ok', complementing the error handler which - sets failure statuses. + This cog works with the automatic instrumentation from tracing.py to provide + proper error handling and status management for both prefix commands and slash commands. + It does not create transactions manually, as that is handled by the automatic + instrumentation system. """ - def __init__(self, sentry_manager: SentryManager) -> None: - """ - Initialize the Sentry handler cog. + # Standard Sentry transaction statuses with ClassVar + # See: https://develop.sentry.dev/sdk/event-payloads/transaction/#transaction-status + STATUS: ClassVar[dict[str, str]] = { + "OK": "ok", + "UNKNOWN": "unknown", + "ERROR": "internal_error", + "NOT_FOUND": "not_found", + "PERMISSION_DENIED": "permission_denied", + "INVALID_ARGUMENT": "invalid_argument", + "RESOURCE_EXHAUSTED": "resource_exhausted", + "UNAUTHENTICATED": "unauthenticated", + "CANCELLED": "cancelled", + } + + def __init__(self, bot: Tux) -> None: + """Initialize the Sentry handler cog. Parameters ---------- - sentry_manager : SentryManager - The Sentry manager instance. + bot : Tux + The bot instance to attach the listeners to """ - self.sentry_manager = sentry_manager + self.bot = bot logger.info("Sentry handler initialized") + def _is_sentry_available(self) -> bool: + """Check if Sentry is initialized and available for use. + + Returns + ------- + bool + True if Sentry is initialized, False otherwise + """ + return sentry_sdk.is_initialized() + + def _set_command_context(self, ctx: commands.Context[Tux] | discord.Interaction, command_name: str) -> None: + """Set command context on the current Sentry span. + + Parameters + ---------- + ctx : Union[commands.Context[Tux], discord.Interaction] + The command context or interaction + command_name : str + The name of the command being executed + """ + if not self._is_sentry_available(): + return + + if self._is_sentry_available(): + # Set command-specific tags + if isinstance(ctx, commands.Context): + set_span_attributes( + { + "discord.command.name": command_name, + "discord.guild.id": str(ctx.guild.id) if ctx.guild else "DM", + "discord.channel.id": ctx.channel.id, + "discord.user.id": ctx.author.id, + "discord.message.id": ctx.message.id, + "discord.command.type": "prefix", + }, + ) + else: # discord.Interaction + set_span_attributes( + { + "discord.command.name": command_name, + "discord.guild.id": str(ctx.guild_id) if ctx.guild_id else "DM", + "discord.channel.id": ctx.channel_id, + "discord.user.id": ctx.user.id, + "discord.interaction.id": ctx.id, + "discord.interaction.type": ctx.type.name, + "discord.command.type": "slash", + }, + ) + @commands.Cog.listener() - async def on_command_completion(self, ctx: commands.Context[Tux]) -> None: + async def on_command(self, ctx: commands.Context[Tux]) -> None: """ - Sets the Sentry transaction status to 'ok' for a completed prefix command. + Set context for a prefix command execution. + + This works with the automatic instrumentation to add command-specific + context to the existing transaction. Parameters ---------- ctx : commands.Context[Tux] The command context """ - if span := self.sentry_manager.get_current_span(): - span.set_status(self.sentry_manager.STATUS["OK"]) - logger.trace(f"Set Sentry span status to 'ok' for command: {ctx.command}") + if command_name := (ctx.command.qualified_name if ctx.command else "Unknown Command"): + self._set_command_context(ctx, command_name) + logger.trace(f"Set context for prefix command: {command_name}") @commands.Cog.listener() - async def on_app_command_completion(self, interaction: discord.Interaction, command: CommandObject) -> None: + async def on_command_error(self, ctx: commands.Context[Tux], error: commands.CommandError) -> None: """ - Sets the Sentry transaction status to 'ok' for a completed application command. + Handle errors for prefix commands. + + This captures command errors and sets the appropriate status on the + current transaction. + + Parameters + ---------- + ctx : commands.Context[Tux] + The command context + error : commands.CommandError + The error that occurred + """ + if not self._is_sentry_available(): + return + + # Capture the error in the current span + capture_span_exception(error, command_name=ctx.command.qualified_name if ctx.command else "Unknown") + + # Set appropriate status based on error type + if isinstance(error, commands.CommandNotFound): + set_span_status("NOT_FOUND") + elif isinstance(error, commands.MissingPermissions): + set_span_status("PERMISSION_DENIED") + elif isinstance(error, commands.BadArgument): + set_span_status("INVALID_ARGUMENT") + else: + set_span_status("ERROR") + + logger.debug(f"Captured error for prefix command: {error}") + + @commands.Cog.listener() + async def on_interaction(self, interaction: discord.Interaction) -> None: + """ + Set context for application command interactions. + + This works with the automatic instrumentation to add command-specific + context to the existing transaction. + + Parameters + ---------- + interaction : discord.Interaction + The interaction object + """ + if interaction.type != discord.InteractionType.application_command: + return + + if command_name := (interaction.command.qualified_name if interaction.command else "Unknown App Command"): + self._set_command_context(interaction, command_name) + logger.trace(f"Set context for app command: {command_name}") + + @commands.Cog.listener() + async def on_app_command_error( + self, + interaction: discord.Interaction, + error: discord.app_commands.AppCommandError, + ) -> None: + """ + Handle errors for application commands. + + This captures command errors and sets the appropriate status on the + current transaction. Parameters ---------- interaction : discord.Interaction The interaction object - command : CommandObject - The command that was completed + error : discord.app_commands.AppCommandError + The error that occurred """ - if span := self.sentry_manager.get_current_span(): - span.set_status(self.sentry_manager.STATUS["OK"]) - logger.trace(f"Set Sentry span status to 'ok' for app command: {command.name}") + if not self._is_sentry_available(): + return + + # Capture the error in the current span + command_name = interaction.command.qualified_name if interaction.command else "Unknown" + capture_span_exception(error, command_name=command_name) + + # Set appropriate status based on error type + if isinstance(error, discord.app_commands.CommandNotFound): + set_span_status("NOT_FOUND") + elif isinstance(error, discord.app_commands.MissingPermissions): + set_span_status("PERMISSION_DENIED") + else: + set_span_status("ERROR") + + logger.debug(f"Captured error for app command: {error}") async def setup(bot: Tux) -> None: """Add the SentryHandler cog to the bot.""" - await bot.add_cog(SentryHandler(bot.sentry_manager)) + await bot.add_cog(SentryHandler(bot)) diff --git a/tux/utils/hot_reload.py b/tux/utils/hot_reload.py index 2be629e33..1d6cc5d8e 100644 --- a/tux/utils/hot_reload.py +++ b/tux/utils/hot_reload.py @@ -821,6 +821,7 @@ def _populate_all_file_hashes(self) -> int: def start(self) -> None: """Start watching for file changes.""" try: + self.observer.schedule(self, str(self.watch_path), recursive=self.recursive) self.observer.start() logger.info(f"Hot reload watching {self.watch_path}") except Exception as e: diff --git a/tux/utils/sentry_manager.py b/tux/utils/sentry_manager.py index ed4ae055f..5ac3ba917 100644 --- a/tux/utils/sentry_manager.py +++ b/tux/utils/sentry_manager.py @@ -344,6 +344,25 @@ def setup() -> None: except Exception as e: logger.error(f"Failed to initialize Sentry: {e}") + @staticmethod + def _set_signal_scope_tags(scope: Any, signum: int) -> None: + """Set signal-related tags on a Sentry scope. + + Parameters + ---------- + scope : Any + The Sentry scope to modify + signum : int + The signal number + """ + tags = { + "signal.number": signum, + "lifecycle.event": "termination_signal", + } + + for key, value in tags.items(): + scope.set_tag(key, value) + @staticmethod def report_signal(signum: int, _frame: FrameType | None) -> None: """ @@ -363,8 +382,7 @@ def report_signal(signum: int, _frame: FrameType | None) -> None: """ if sentry_sdk.is_initialized(): with sentry_sdk.push_scope() as scope: - scope.set_tag("signal.number", signum) - scope.set_tag("lifecycle.event", "termination_signal") + SentryManager._set_signal_scope_tags(scope, signum) sentry_sdk.add_breadcrumb( category="lifecycle", message=f"Received termination signal {signum}", @@ -537,10 +555,16 @@ def _set_scope_context(self, scope: Any, context: dict[str, Any]) -> None: """ scope.set_user({"id": context.get("user_id"), "username": context.get("user_name")}) scope.set_context("discord", context) - scope.set_tag("command_name", context.get("command_name", "Unknown")) - scope.set_tag("command_type", context.get("command_type", "Unknown")) - guild_id = context.get("guild_id") - scope.set_tag("guild_id", str(guild_id) if guild_id else "DM") + + # Set tags using a dictionary to avoid repetitive set_tag calls + tags = { + "command_name": context.get("command_name", "Unknown"), + "command_type": context.get("command_type", "Unknown"), + "guild_id": str(context.get("guild_id")) if context.get("guild_id") else "DM", + } + + for key, value in tags.items(): + scope.set_tag(key, value) def set_user_context(self, user: discord.User | discord.Member) -> None: """ diff --git a/tux/utils/tracing.py b/tux/utils/tracing.py index 86ca2c27a..b34a0f5de 100644 --- a/tux/utils/tracing.py +++ b/tux/utils/tracing.py @@ -237,6 +237,10 @@ def transaction( """ def decorator(func: Callable[P, R]) -> Callable[P, R]: + # Early return if Sentry is not initialized to avoid wrapper overhead + if not sentry_sdk.is_initialized(): + return func + transaction_name = name or f"{func.__module__}.{func.__qualname__}" transaction_description = description or f"Executing {func.__qualname__}" @@ -274,6 +278,10 @@ def span(op: str, description: str | None = None) -> Callable[[Callable[P, R]], """ def decorator(func: Callable[P, R]) -> Callable[P, R]: + # Early return if Sentry is not initialized to avoid wrapper overhead + if not sentry_sdk.is_initialized(): + return func + span_description = description or f"Executing {func.__qualname__}" def context_factory() -> Any: @@ -422,6 +430,71 @@ def set_span_attributes(attributes: dict[str, Any]) -> None: span.set_tag(key, value) +def set_span_status(status: str, status_map: dict[str, str] | None = None) -> None: + """ + Set status on the current span. + + Parameters + ---------- + status : str + The status to set (e.g., "OK", "ERROR", "NOT_FOUND") + status_map : dict[str, str] | None, optional + A mapping of status keys to Sentry status values. If None, uses default mapping. + """ + if not sentry_sdk.is_initialized(): + return + + if span := sentry_sdk.get_current_span(): + # Default status mapping if none provided + if status_map is None: + status_map = { + "OK": "ok", + "UNKNOWN": "unknown", + "ERROR": "internal_error", + "NOT_FOUND": "not_found", + "PERMISSION_DENIED": "permission_denied", + "INVALID_ARGUMENT": "invalid_argument", + "RESOURCE_EXHAUSTED": "resource_exhausted", + "UNAUTHENTICATED": "unauthenticated", + "CANCELLED": "cancelled", + } + + span.set_status(status_map.get(status, status)) + + +def set_setup_phase_tag(span: Any, phase: str, status: str = "starting") -> None: + """ + Set a setup phase tag on the span. + + Parameters + ---------- + span : Any + The Sentry span to tag + phase : str + The phase name (e.g., "database", "cogs") + status : str + The status ("starting" or "finished") + """ + span.set_tag("setup_phase", f"{phase}_{status}") + + +def set_span_error(span: Any, error: Exception, error_type: str = "error") -> None: + """ + Set error information on a span with consistent patterns. + + Parameters + ---------- + span : Any + The Sentry span to set error data on + error : Exception + The exception that occurred + error_type : str + The type of error (e.g., "error", "discord_error", "db_error") + """ + span.set_status("internal_error") + span.set_data(error_type, str(error)) + + def capture_span_exception(exception: Exception, **extra_data: Any) -> None: """ Capture an exception in the current span with consistent error handling. From 3440841b51a33e5df5a7514febcf7d9dcfed8609 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Mon, 21 Jul 2025 01:02:21 -0400 Subject: [PATCH 29/34] refactor(handlers): streamline command-specific tag setting in SentryHandler - Consolidate command-specific tag setting logic for better readability - Maintain functionality for both commands.Context and discord.Interaction types - Improve code clarity by reducing redundancy in attribute setting --- tux/handlers/sentry.py | 49 +++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/tux/handlers/sentry.py b/tux/handlers/sentry.py index abd945fab..9618fb597 100644 --- a/tux/handlers/sentry.py +++ b/tux/handlers/sentry.py @@ -72,31 +72,30 @@ def _set_command_context(self, ctx: commands.Context[Tux] | discord.Interaction, if not self._is_sentry_available(): return - if self._is_sentry_available(): - # Set command-specific tags - if isinstance(ctx, commands.Context): - set_span_attributes( - { - "discord.command.name": command_name, - "discord.guild.id": str(ctx.guild.id) if ctx.guild else "DM", - "discord.channel.id": ctx.channel.id, - "discord.user.id": ctx.author.id, - "discord.message.id": ctx.message.id, - "discord.command.type": "prefix", - }, - ) - else: # discord.Interaction - set_span_attributes( - { - "discord.command.name": command_name, - "discord.guild.id": str(ctx.guild_id) if ctx.guild_id else "DM", - "discord.channel.id": ctx.channel_id, - "discord.user.id": ctx.user.id, - "discord.interaction.id": ctx.id, - "discord.interaction.type": ctx.type.name, - "discord.command.type": "slash", - }, - ) + # Set command-specific tags + if isinstance(ctx, commands.Context): + set_span_attributes( + { + "discord.command.name": command_name, + "discord.guild.id": str(ctx.guild.id) if ctx.guild else "DM", + "discord.channel.id": ctx.channel.id, + "discord.user.id": ctx.author.id, + "discord.message.id": ctx.message.id, + "discord.command.type": "prefix", + }, + ) + else: # discord.Interaction + set_span_attributes( + { + "discord.command.name": command_name, + "discord.guild.id": str(ctx.guild_id) if ctx.guild_id else "DM", + "discord.channel.id": ctx.channel_id, + "discord.user.id": ctx.user.id, + "discord.interaction.id": ctx.id, + "discord.interaction.type": ctx.type.name, + "discord.command.type": "slash", + }, + ) @commands.Cog.listener() async def on_command(self, ctx: commands.Context[Tux]) -> None: From cf6a7bbae3abe52bba669bf0363e62d86edc117f Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Mon, 21 Jul 2025 01:02:27 -0400 Subject: [PATCH 30/34] feat(utils): validate configuration in CogWatcher for improved reliability - Add configuration validation in CogWatcher to ensure proper setup - Enhance robustness by preventing potential misconfigurations - Maintain existing functionality while improving error handling --- tux/utils/hot_reload.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tux/utils/hot_reload.py b/tux/utils/hot_reload.py index 1d6cc5d8e..a113a28ba 100644 --- a/tux/utils/hot_reload.py +++ b/tux/utils/hot_reload.py @@ -736,6 +736,7 @@ def __init__( self.watch_path = Path(path) self.recursive = recursive self.config = config or HotReloadConfig() + validate_config(self.config) self.extension_map: dict[str, str] = {} self.dependency_graph = DependencyGraph(self.config) self.file_hash_tracker = FileHashTracker() From fc16eab8b0d28e7e73f3ed478c7f7399b420651c Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Mon, 21 Jul 2025 01:02:33 -0400 Subject: [PATCH 31/34] feat(utils): add asynchronous flush method to SentryManager - Introduce flush_async method to allow non-blocking flush of pending Sentry events - Utilize asyncio to prevent blocking the event loop during shutdown - Enhance performance and responsiveness in applications using SentryManager --- tux/utils/sentry_manager.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tux/utils/sentry_manager.py b/tux/utils/sentry_manager.py index 5ac3ba917..7e319ad38 100644 --- a/tux/utils/sentry_manager.py +++ b/tux/utils/sentry_manager.py @@ -16,6 +16,7 @@ from __future__ import annotations +import asyncio from types import FrameType from typing import Any, ClassVar, Literal, cast @@ -401,6 +402,18 @@ def flush() -> None: if sentry_sdk.is_initialized(): sentry_sdk.flush() + @staticmethod + async def flush_async() -> None: + """ + Asynchronously flushes all pending Sentry events. + + This method prevents blocking the event loop during shutdown by + running the synchronous flush operation in an executor. + """ + if sentry_sdk.is_initialized(): + loop: asyncio.AbstractEventLoop = asyncio.get_running_loop() + await loop.run_in_executor(None, SentryManager.flush) + @property def is_initialized(self) -> bool: """ From 252a56b26007caddd2b743a479e95579a4b5e4e0 Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Mon, 21 Jul 2025 01:02:39 -0400 Subject: [PATCH 32/34] feat(utils): add @instrumented_task decorator for task instrumentation - Introduce @instrumented_task decorator to instrument critical task coroutines for monitoring and metrics - Update TaskManager documentation to reflect new usage pattern for wrapping critical tasks - Enhance validation of Sentry instrumentation setup for better reliability --- tux/utils/task_manager.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/tux/utils/task_manager.py b/tux/utils/task_manager.py index 420964482..552061e13 100644 --- a/tux/utils/task_manager.py +++ b/tux/utils/task_manager.py @@ -19,6 +19,7 @@ import asyncio import contextlib +import functools import time from collections import defaultdict, deque from collections.abc import Callable, Coroutine @@ -33,6 +34,21 @@ from tux.utils.tracing import start_span, transaction +def instrumented_task(coro: Callable[..., Coroutine[Any, Any, Any]]) -> Callable[..., Coroutine[Any, Any, Any]]: + """ + Decorator to instrument a task coroutine for monitoring/metrics. + Apply this decorator to critical task coroutines at definition time. + """ + + @functools.wraps(coro) + async def wrapper(*args: Any, **kwargs: Any) -> Any: + # Insert instrumentation logic here (e.g., Sentry, metrics, logging) + # Start timing, add tracing, etc. + return await coro(*args, **kwargs) + + return wrapper + + class TaskCategory(Enum): """Categories for background tasks.""" @@ -145,10 +161,19 @@ def setup_task_instrumentation(self) -> None: """ Initializes instrumentation for all registered critical tasks. - This method should be called after all cogs are loaded to ensure - that the task objects are available to be wrapped. + To ensure compatibility with discord.py and avoid relying on internal + implementation details, critical task coroutines should be wrapped with + the @instrumented_task decorator at definition time. This ensures that + instrumentation is applied in a supported and robust manner. + + Example usage: + @instrumented_task + async def my_critical_task(...): + ... + + This method can still be used for any additional setup or validation. """ - logger.info("Setting up Sentry instrumentation for critical tasks...") + logger.info("Validating Sentry instrumentation for critical tasks...") for task_name, config in self.critical_tasks.items(): if not (cog := self.bot.cogs.get(config.cog_name)): From 7b06cc9650cbf6330c9ad7c320512d6d1d32598a Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Mon, 21 Jul 2025 01:02:54 -0400 Subject: [PATCH 33/34] feat(app): enhance signal handling for graceful shutdown with event loop support - Update setup_signals method to accept an event loop for better cross-platform compatibility - Implement signal handlers for SIGINT and SIGTERM using loop.add_signal_handler - Add fallback for platforms that do not support add_signal_handler, with a warning for Windows limitations - Change SentryManager.flush to SentryManager.flush_async for non-blocking shutdown --- tux/app.py | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/tux/app.py b/tux/app.py index 3bd954bc2..4e8e0244c 100644 --- a/tux/app.py +++ b/tux/app.py @@ -14,6 +14,7 @@ import asyncio import signal +import sys import discord from loguru import logger @@ -75,8 +76,9 @@ async def start(self) -> None: # Initialize Sentry SentryManager.setup() - # Set up signal handlers - self.setup_signals() + # Set up signal handlers using the event loop for cross-platform compatibility + loop = asyncio.get_event_loop() + self.setup_signals(loop) # Validate config if not self.validate_config(): @@ -132,23 +134,38 @@ async def shutdown(self) -> None: if self.bot and not self.bot.is_closed(): await self.bot.shutdown() - SentryManager.flush() + await SentryManager.flush_async() await asyncio.sleep(0.1) # Brief pause to allow buffers to flush logger.info("Shutdown complete") # --- Environment Setup --- - def setup_signals(self) -> None: + def setup_signals(self, loop: asyncio.AbstractEventLoop) -> None: """ - Sets up OS-level signal handlers for graceful shutdown. + Sets up OS-level signal handlers for graceful shutdown using the event loop for better cross-platform compatibility. - This ensures that when the bot process receives a SIGINT (Ctrl+C) or - SIGTERM (from systemd or Docker), it is intercepted and handled - cleanly instead of causing an abrupt exit. + Note: loop.add_signal_handler may not be available on all platforms (e.g., Windows for some signals). """ - signal.signal(signal.SIGTERM, SentryManager.report_signal) - signal.signal(signal.SIGINT, SentryManager.report_signal) + + def handle_sigterm() -> None: + SentryManager.report_signal(signal.SIGTERM, None) + + def handle_sigint() -> None: + SentryManager.report_signal(signal.SIGINT, None) + + try: + loop.add_signal_handler(signal.SIGTERM, handle_sigterm) + loop.add_signal_handler(signal.SIGINT, handle_sigint) + except NotImplementedError: + # Fallback for platforms that do not support add_signal_handler (e.g., Windows) + signal.signal(signal.SIGINT, SentryManager.report_signal) + signal.signal(signal.SIGTERM, SentryManager.report_signal) + if sys.platform.startswith("win"): + # Document limitation + logger.warning( + "Warning: Signal handling is limited on Windows. Some signals may not be handled as expected.", + ) def validate_config(self) -> bool: """ From c38c843d36381006ece0c5d228a5817651d4eece Mon Sep 17 00:00:00 2001 From: kzndotsh Date: Fri, 25 Jul 2025 22:41:36 -0400 Subject: [PATCH 34/34] chore(cli): change file permissions to executable for CLI scripts Update file permissions for several CLI-related Python scripts to executable mode (100755). This change ensures that these scripts can be executed directly from the command line, improving usability for developers and users who interact with the CLI tools. --- tux/cli/__init__.py | 0 tux/cli/core.py | 0 tux/cli/database.py | 0 tux/cli/dev.py | 0 tux/cli/docker.py | 0 tux/cli/docs.py | 0 tux/cli/test.py | 0 tux/cli/ui.py | 0 8 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tux/cli/__init__.py mode change 100644 => 100755 tux/cli/core.py mode change 100644 => 100755 tux/cli/database.py mode change 100644 => 100755 tux/cli/dev.py mode change 100644 => 100755 tux/cli/docker.py mode change 100644 => 100755 tux/cli/docs.py mode change 100644 => 100755 tux/cli/test.py mode change 100644 => 100755 tux/cli/ui.py diff --git a/tux/cli/__init__.py b/tux/cli/__init__.py old mode 100644 new mode 100755 diff --git a/tux/cli/core.py b/tux/cli/core.py old mode 100644 new mode 100755 diff --git a/tux/cli/database.py b/tux/cli/database.py old mode 100644 new mode 100755 diff --git a/tux/cli/dev.py b/tux/cli/dev.py old mode 100644 new mode 100755 diff --git a/tux/cli/docker.py b/tux/cli/docker.py old mode 100644 new mode 100755 diff --git a/tux/cli/docs.py b/tux/cli/docs.py old mode 100644 new mode 100755 diff --git a/tux/cli/test.py b/tux/cli/test.py old mode 100644 new mode 100755 diff --git a/tux/cli/ui.py b/tux/cli/ui.py old mode 100644 new mode 100755