Initial commit — ComfyUI Discord bot + web UI
Full source for the-third-rev: Discord bot (discord.py), FastAPI web UI (React/TS/Vite/Tailwind), ComfyUI integration, generation history DB, preset manager, workflow inspector, and all supporting modules. Excluded from tracking: .env, invite_tokens.json, *.db (SQLite), current-workflow-changes.json, user_settings/, presets/, logs/, web-static/ (build output), frontend/node_modules/. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
64
commands/__init__.py
Normal file
64
commands/__init__.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
commands package
|
||||
================
|
||||
|
||||
Discord bot commands for the ComfyUI bot.
|
||||
|
||||
This package contains all command handlers organized by functionality:
|
||||
- generation: Image/video generation commands (generate, workflow-gen, rerun, cancel)
|
||||
- workflow: Workflow management commands
|
||||
- upload: Image upload commands
|
||||
- history: History viewing and retrieval commands
|
||||
- workflow_changes: Runtime workflow parameter management (prompt, seed, etc.)
|
||||
- utility: Quality-of-life commands (ping, status, comfy-stats, comfy-queue, uptime)
|
||||
- presets: Named workflow preset management
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from config import BotConfig
|
||||
|
||||
from .generation import setup_generation_commands
|
||||
from .input_images import setup_input_image_commands
|
||||
from .server import setup_server_commands
|
||||
from .workflow import setup_workflow_commands
|
||||
from .history import setup_history_commands
|
||||
from .workflow_changes import setup_workflow_changes_commands
|
||||
from .utility import setup_utility_commands
|
||||
from .presets import setup_preset_commands
|
||||
from .help_command import CustomHelpCommand
|
||||
|
||||
|
||||
def register_all_commands(bot, config: BotConfig):
|
||||
"""
|
||||
Register all bot commands.
|
||||
|
||||
This function should be called once during bot initialization to set up
|
||||
all command handlers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bot : commands.Bot
|
||||
The Discord bot instance.
|
||||
config : BotConfig
|
||||
The bot configuration object containing environment settings.
|
||||
"""
|
||||
setup_generation_commands(bot, config)
|
||||
setup_input_image_commands(bot, config)
|
||||
setup_server_commands(bot, config)
|
||||
setup_workflow_commands(bot)
|
||||
setup_history_commands(bot)
|
||||
setup_workflow_changes_commands(bot)
|
||||
setup_utility_commands(bot)
|
||||
setup_preset_commands(bot)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"register_all_commands",
|
||||
"setup_generation_commands",
|
||||
"setup_input_image_commands",
|
||||
"setup_workflow_commands",
|
||||
"setup_history_commands",
|
||||
"setup_workflow_changes_commands",
|
||||
"setup_utility_commands",
|
||||
"setup_preset_commands",
|
||||
"CustomHelpCommand",
|
||||
]
|
||||
389
commands/generation.py
Normal file
389
commands/generation.py
Normal file
@@ -0,0 +1,389 @@
|
||||
"""
|
||||
commands/generation.py
|
||||
======================
|
||||
|
||||
Image and video generation commands for the Discord ComfyUI bot.
|
||||
|
||||
Jobs are submitted directly to ComfyUI (no internal SerialJobQueue).
|
||||
ComfyUI's own queue handles ordering. Each Discord command waits for its
|
||||
prompt_id to complete via WebSocket and then replies with the result.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
try:
|
||||
import aiohttp # type: ignore
|
||||
except Exception: # pragma: no cover
|
||||
aiohttp = None # type: ignore
|
||||
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import discord
|
||||
from discord.ext import commands
|
||||
|
||||
from config import ARG_PROMPT_KEY, ARG_NEG_PROMPT_KEY, ARG_QUEUE_KEY, MAX_IMAGES_PER_RESPONSE
|
||||
from discord_utils import require_comfy_client, convert_image_bytes_to_discord_files
|
||||
from media_uploader import flush_pending
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def _safe_reply(
|
||||
ctx: commands.Context,
|
||||
*,
|
||||
content: str | None = None,
|
||||
files: list[discord.File] | None = None,
|
||||
mention_author: bool = True,
|
||||
delete_after: float | None = None,
|
||||
tries: int = 4,
|
||||
base_delay: float = 1.0,
|
||||
):
|
||||
"""Reply to Discord with retries for transient network/Discord errors."""
|
||||
delay = base_delay
|
||||
last_exc: Exception | None = None
|
||||
|
||||
for attempt in range(1, tries + 1):
|
||||
try:
|
||||
return await ctx.reply(
|
||||
content=content,
|
||||
files=files or [],
|
||||
mention_author=mention_author,
|
||||
delete_after=delete_after,
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
last_exc = exc
|
||||
transient = False
|
||||
|
||||
if isinstance(exc, asyncio.TimeoutError):
|
||||
transient = True
|
||||
elif isinstance(exc, OSError) and getattr(exc, "winerror", None) in {
|
||||
64, 121, 1231, 10053, 10054,
|
||||
}:
|
||||
transient = True
|
||||
|
||||
if aiohttp is not None:
|
||||
try:
|
||||
if isinstance(exc, (aiohttp.ClientOSError, aiohttp.ClientConnectionError)):
|
||||
transient = True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if isinstance(exc, discord.HTTPException):
|
||||
status = getattr(exc, "status", None)
|
||||
if status is None or status >= 500 or status == 429:
|
||||
transient = True
|
||||
|
||||
if (not transient) or attempt == tries:
|
||||
raise
|
||||
|
||||
logger.warning(
|
||||
"Transient error sending Discord message (attempt %d/%d): %s: %s",
|
||||
attempt, tries, type(exc).__name__, exc,
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
delay *= 2
|
||||
|
||||
raise last_exc # type: ignore[misc]
|
||||
|
||||
|
||||
def _seed_line(bot) -> str:
|
||||
"""Return a formatted seed line if a seed was tracked, else empty string."""
|
||||
seed = getattr(bot.comfy, "last_seed", None)
|
||||
return f"\nSeed: `{seed}`" if seed is not None else ""
|
||||
|
||||
|
||||
async def _run_generate(ctx: commands.Context, bot, prompt_text: str, negative_text: Optional[str]):
|
||||
"""Execute a prompt-based generation and reply with results."""
|
||||
images, prompt_id = await bot.comfy.generate_image(
|
||||
prompt_text, negative_text,
|
||||
source="discord", user_label=ctx.author.display_name,
|
||||
)
|
||||
if not images:
|
||||
await ctx.reply(
|
||||
"No images were generated. Please try again with a different prompt.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
files = convert_image_bytes_to_discord_files(
|
||||
images, max_files=MAX_IMAGES_PER_RESPONSE, prefix="generated"
|
||||
)
|
||||
response_text = f"Generated {len(images)} image(s). Prompt ID: `{prompt_id}`{_seed_line(bot)}"
|
||||
await _safe_reply(ctx, content=response_text, files=files, mention_author=True)
|
||||
|
||||
asyncio.create_task(flush_pending(
|
||||
Path(bot.config.comfy_output_path),
|
||||
bot.config.media_upload_user,
|
||||
bot.config.media_upload_pass,
|
||||
))
|
||||
|
||||
|
||||
async def _run_workflow(ctx: commands.Context, bot, config):
|
||||
"""Execute a workflow-based generation and reply with results."""
|
||||
logger.info("Executing workflow generation")
|
||||
await ctx.reply("Executing workflow…", mention_author=False, delete_after=5.0)
|
||||
images, videos, prompt_id = await bot.comfy.generate_image_with_workflow(
|
||||
source="discord", user_label=ctx.author.display_name,
|
||||
)
|
||||
|
||||
if not images and not videos:
|
||||
await ctx.reply(
|
||||
"No images or videos were generated. Check the workflow and ComfyUI logs.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
seed_info = _seed_line(bot)
|
||||
|
||||
if videos:
|
||||
output_path = config.comfy_output_path
|
||||
video_file = None
|
||||
for video_info in videos:
|
||||
video_name = video_info.get("video_name")
|
||||
video_subfolder = video_info.get("video_subfolder", "")
|
||||
if video_name:
|
||||
video_path = (
|
||||
Path(output_path) / video_subfolder / video_name
|
||||
if video_subfolder
|
||||
else Path(output_path) / video_name
|
||||
)
|
||||
try:
|
||||
video_file = discord.File(
|
||||
BytesIO(video_path.read_bytes()), filename=video_name
|
||||
)
|
||||
break
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to read video %s: %s", video_path, exc)
|
||||
|
||||
if video_file:
|
||||
response_text = (
|
||||
f"Generated {len(images)} image(s) and a video. "
|
||||
f"Prompt ID: `{prompt_id}`{seed_info}"
|
||||
)
|
||||
await _safe_reply(ctx, content=response_text, files=[video_file], mention_author=True)
|
||||
else:
|
||||
await ctx.reply(
|
||||
f"Generated output but failed to read video file. "
|
||||
f"Prompt ID: `{prompt_id}`{seed_info}",
|
||||
mention_author=True,
|
||||
)
|
||||
else:
|
||||
files = convert_image_bytes_to_discord_files(
|
||||
images, max_files=MAX_IMAGES_PER_RESPONSE, prefix="generated"
|
||||
)
|
||||
response_text = (
|
||||
f"Generated {len(images)} image(s) using workflow. "
|
||||
f"Prompt ID: `{prompt_id}`{seed_info}"
|
||||
)
|
||||
await _safe_reply(ctx, content=response_text, files=files, mention_author=True)
|
||||
|
||||
asyncio.create_task(flush_pending(
|
||||
Path(config.comfy_output_path),
|
||||
config.media_upload_user,
|
||||
config.media_upload_pass,
|
||||
))
|
||||
|
||||
|
||||
def setup_generation_commands(bot, config):
|
||||
"""Register generation commands with the bot."""
|
||||
|
||||
@bot.command(name="test", extras={"category": "Generation"})
|
||||
async def test_command(ctx: commands.Context) -> None:
|
||||
"""A simple test command to verify the bot is working."""
|
||||
await ctx.reply(
|
||||
"The bot is working! Use `ttr!generate` to create images.",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
@bot.command(name="generate", aliases=["gen"], extras={"category": "Generation"})
|
||||
@require_comfy_client
|
||||
async def generate(ctx: commands.Context, *, args: str = "") -> None:
|
||||
"""
|
||||
Generate images using ComfyUI.
|
||||
|
||||
Usage::
|
||||
|
||||
ttr!generate prompt:<your prompt> negative_prompt:<your negatives>
|
||||
|
||||
The ``prompt:`` keyword is required. ``negative_prompt:`` is optional.
|
||||
"""
|
||||
prompt_text: Optional[str] = None
|
||||
negative_text: Optional[str] = None
|
||||
|
||||
if args:
|
||||
if ARG_PROMPT_KEY in args:
|
||||
parts = args.split(ARG_PROMPT_KEY, 1)[1]
|
||||
if ARG_NEG_PROMPT_KEY in parts:
|
||||
p, n = parts.split(ARG_NEG_PROMPT_KEY, 1)
|
||||
prompt_text = p.strip()
|
||||
negative_text = n.strip() or None
|
||||
else:
|
||||
prompt_text = parts.strip()
|
||||
else:
|
||||
prompt_text = args.strip()
|
||||
|
||||
if not prompt_text:
|
||||
await ctx.reply(
|
||||
f"Please specify a prompt: `{ARG_PROMPT_KEY}<your prompt>`.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
bot.last_gen = {"mode": "prompt", "prompt": prompt_text, "negative": negative_text}
|
||||
|
||||
try:
|
||||
# Show queue position from ComfyUI before waiting
|
||||
depth = await bot.comfy.get_queue_depth()
|
||||
pos = depth + 1
|
||||
ack = await ctx.reply(
|
||||
f"Queued ✅ (ComfyUI position: ~{pos})",
|
||||
mention_author=False,
|
||||
delete_after=30.0,
|
||||
)
|
||||
await _run_generate(ctx, bot, prompt_text, negative_text)
|
||||
except Exception as exc:
|
||||
logger.exception("Error generating image")
|
||||
await ctx.reply(
|
||||
f"An error occurred: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
@bot.command(
|
||||
name="workflow-gen",
|
||||
aliases=["workflow-generate", "wfg"],
|
||||
extras={"category": "Generation"},
|
||||
)
|
||||
@require_comfy_client
|
||||
async def generate_workflow_command(ctx: commands.Context, *, args: str = "") -> None:
|
||||
"""
|
||||
Generate using the currently loaded workflow template.
|
||||
|
||||
Usage::
|
||||
|
||||
ttr!workflow-gen
|
||||
ttr!workflow-gen queue:<number>
|
||||
"""
|
||||
bot.last_gen = {"mode": "workflow", "prompt": None, "negative": None}
|
||||
|
||||
# Handle batch queue parameter
|
||||
if ARG_QUEUE_KEY in args:
|
||||
number_part = args.split(ARG_QUEUE_KEY, 1)[1].strip()
|
||||
if number_part.isdigit():
|
||||
queue_times = int(number_part)
|
||||
if queue_times > 1:
|
||||
await ctx.reply(
|
||||
f"Queuing {queue_times} workflow runs…",
|
||||
mention_author=False,
|
||||
)
|
||||
for i in range(queue_times):
|
||||
try:
|
||||
depth = await bot.comfy.get_queue_depth()
|
||||
pos = depth + 1
|
||||
await ctx.reply(
|
||||
f"Queued run {i+1}/{queue_times} ✅ (ComfyUI position: ~{pos})",
|
||||
mention_author=False,
|
||||
delete_after=30.0,
|
||||
)
|
||||
await _run_workflow(ctx, bot, config)
|
||||
except Exception as exc:
|
||||
logger.exception("Error on workflow run %d", i + 1)
|
||||
await ctx.reply(
|
||||
f"Error on run {i+1}: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
else:
|
||||
await ctx.reply(
|
||||
"Please provide a number greater than 1 for queueing multiple runs.",
|
||||
mention_author=False,
|
||||
delete_after=30.0,
|
||||
)
|
||||
return
|
||||
else:
|
||||
await ctx.reply(
|
||||
f"Invalid queue parameter. Use `{ARG_QUEUE_KEY}<number>`.",
|
||||
mention_author=False,
|
||||
delete_after=30.0,
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
depth = await bot.comfy.get_queue_depth()
|
||||
pos = depth + 1
|
||||
await ctx.reply(
|
||||
f"Queued ✅ (ComfyUI position: ~{pos})",
|
||||
mention_author=False,
|
||||
delete_after=30.0,
|
||||
)
|
||||
await _run_workflow(ctx, bot, config)
|
||||
except Exception as exc:
|
||||
logger.exception("Error generating with workflow")
|
||||
await ctx.reply(
|
||||
f"An error occurred: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
@bot.command(name="rerun", aliases=["rr"], extras={"category": "Generation"})
|
||||
@require_comfy_client
|
||||
async def rerun_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Re-run the last generation with the same parameters.
|
||||
|
||||
Re-submits the most recent ``ttr!generate`` or ``ttr!workflow-gen``
|
||||
with the same mode and prompt. Current state overrides (seed,
|
||||
input_image, etc.) are applied at execution time.
|
||||
"""
|
||||
last = getattr(bot, "last_gen", None)
|
||||
if last is None:
|
||||
await ctx.reply(
|
||||
"No previous generation to rerun.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
depth = await bot.comfy.get_queue_depth()
|
||||
pos = depth + 1
|
||||
await ctx.reply(
|
||||
f"Rerun queued ✅ (ComfyUI position: ~{pos})",
|
||||
mention_author=False,
|
||||
delete_after=30.0,
|
||||
)
|
||||
if last["mode"] == "prompt":
|
||||
await _run_generate(ctx, bot, last["prompt"], last["negative"])
|
||||
else:
|
||||
await _run_workflow(ctx, bot, config)
|
||||
except Exception as exc:
|
||||
logger.exception("Error queueing rerun")
|
||||
await ctx.reply(
|
||||
f"An error occurred: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
@bot.command(name="cancel", extras={"category": "Generation"})
|
||||
@require_comfy_client
|
||||
async def cancel_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Clear all pending jobs from the ComfyUI queue.
|
||||
|
||||
Usage::
|
||||
|
||||
ttr!cancel
|
||||
"""
|
||||
try:
|
||||
ok = await bot.comfy.clear_queue()
|
||||
if ok:
|
||||
await ctx.reply("ComfyUI queue cleared.", mention_author=False)
|
||||
else:
|
||||
await ctx.reply(
|
||||
"Failed to clear the ComfyUI queue (server may have returned an error).",
|
||||
mention_author=False,
|
||||
)
|
||||
except Exception as exc:
|
||||
await ctx.reply(f"Error: {exc}", mention_author=False)
|
||||
134
commands/help_command.py
Normal file
134
commands/help_command.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""
|
||||
commands/help_command.py
|
||||
========================
|
||||
|
||||
Custom help command for the Discord ComfyUI bot.
|
||||
|
||||
Replaces discord.py's default help with a categorised listing that
|
||||
automatically includes every registered command.
|
||||
|
||||
How it works
|
||||
------------
|
||||
Each ``@bot.command()`` decorator should carry an ``extras`` dict with a
|
||||
``"category"`` key:
|
||||
|
||||
@bot.command(name="my-command", extras={"category": "Generation"})
|
||||
async def my_command(ctx):
|
||||
\"""One-line brief shown in the listing.
|
||||
|
||||
Longer description shown in ttr!help my-command.
|
||||
\"""
|
||||
|
||||
The first line of the docstring becomes the brief shown in the main
|
||||
listing. The full docstring is shown when the user asks for per-command
|
||||
detail. Commands without a category appear under **Other**.
|
||||
|
||||
Usage
|
||||
-----
|
||||
ttr!help — list all commands grouped by category
|
||||
ttr!help <command> — detailed help for a specific command
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import List, Mapping, Optional
|
||||
|
||||
from discord.ext import commands
|
||||
|
||||
|
||||
# Order in which categories appear in the full help listing.
|
||||
# Any category not listed here appears at the end, sorted alphabetically.
|
||||
CATEGORY_ORDER = ["Generation", "Workflow", "Upload", "History", "Presets", "Utility"]
|
||||
|
||||
|
||||
def _category_sort_key(name: str) -> tuple:
|
||||
"""Return a sort key that respects CATEGORY_ORDER, then alphabetical."""
|
||||
try:
|
||||
return (CATEGORY_ORDER.index(name), name)
|
||||
except ValueError:
|
||||
return (len(CATEGORY_ORDER), name)
|
||||
|
||||
|
||||
class CustomHelpCommand(commands.HelpCommand):
|
||||
"""
|
||||
Categorised help command.
|
||||
|
||||
Groups commands by the ``"category"`` value in their ``extras`` dict.
|
||||
Commands that omit this appear under **Other**.
|
||||
|
||||
Adding a new command to the help output requires no changes here —
|
||||
just set ``extras={"category": "..."}`` on the decorator and write a
|
||||
descriptive docstring.
|
||||
"""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Main listing — ttr!help
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def send_bot_help(
|
||||
self,
|
||||
mapping: Mapping[Optional[commands.Cog], List[commands.Command]],
|
||||
) -> None:
|
||||
"""Send the full command listing grouped by category."""
|
||||
# Collect all visible commands across every cog / None bucket
|
||||
all_commands: List[commands.Command] = []
|
||||
for cmds in mapping.values():
|
||||
filtered = await self.filter_commands(cmds)
|
||||
all_commands.extend(filtered)
|
||||
|
||||
# Group by category
|
||||
categories: dict[str, list[commands.Command]] = defaultdict(list)
|
||||
for cmd in all_commands:
|
||||
cat = cmd.extras.get("category", "Other")
|
||||
categories[cat].append(cmd)
|
||||
|
||||
prefix = self.context.prefix
|
||||
lines: list[str] = [f"**Commands** — prefix: `{prefix}`\n"]
|
||||
|
||||
for cat in sorted(categories.keys(), key=_category_sort_key):
|
||||
cmds = sorted(categories[cat], key=lambda c: c.name)
|
||||
lines.append(f"**{cat}**")
|
||||
for cmd in cmds:
|
||||
aliases = (
|
||||
f" ({', '.join(cmd.aliases)})" if cmd.aliases else ""
|
||||
)
|
||||
brief = cmd.short_doc or "No description."
|
||||
lines.append(f" `{cmd.name}`{aliases} — {brief}")
|
||||
lines.append("")
|
||||
|
||||
lines.append(
|
||||
f"Use `{prefix}help <command>` for details on a specific command."
|
||||
)
|
||||
|
||||
await self.get_destination().send("\n".join(lines))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Per-command detail — ttr!help <command>
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def send_command_help(self, command: commands.Command) -> None:
|
||||
"""Send detailed help for a single command."""
|
||||
prefix = self.context.prefix
|
||||
header = f"`{prefix}{command.name}`"
|
||||
if command.aliases:
|
||||
alias_list = ", ".join(f"`{a}`" for a in command.aliases)
|
||||
header += f" (aliases: {alias_list})"
|
||||
|
||||
category = command.extras.get("category", "Other")
|
||||
lines: list[str] = [header, f"Category: **{category}**", ""]
|
||||
|
||||
if command.help:
|
||||
lines.append(command.help.strip())
|
||||
else:
|
||||
lines.append("No description available.")
|
||||
|
||||
await self.get_destination().send("\n".join(lines))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Error — unknown command name
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def send_error_message(self, error: str) -> None:
|
||||
"""Forward the error text to the channel."""
|
||||
await self.get_destination().send(error)
|
||||
169
commands/history.py
Normal file
169
commands/history.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""
|
||||
commands/history.py
|
||||
===================
|
||||
|
||||
History management commands for the Discord ComfyUI bot.
|
||||
|
||||
This module contains commands for viewing and retrieving past generation
|
||||
results from the bot's history.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from io import BytesIO
|
||||
from typing import Optional
|
||||
|
||||
import discord
|
||||
from discord.ext import commands
|
||||
|
||||
from config import MAX_IMAGES_PER_RESPONSE
|
||||
from discord_utils import require_comfy_client, truncate_text, convert_image_bytes_to_discord_files
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_history_commands(bot):
|
||||
"""
|
||||
Register history management commands with the bot.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bot : commands.Bot
|
||||
The Discord bot instance.
|
||||
"""
|
||||
|
||||
@bot.command(name="history", extras={"category": "History"})
|
||||
@require_comfy_client
|
||||
async def history_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Show a list of recently generated prompts.
|
||||
|
||||
The bot keeps a rolling history of the last few generations. Each
|
||||
entry lists the prompt id along with the positive and negative
|
||||
prompt texts. You can retrieve the images from a previous
|
||||
generation with the ``ttr!gethistory <prompt_id>`` command.
|
||||
"""
|
||||
hist = bot.comfy.get_history()
|
||||
if not hist:
|
||||
await ctx.reply(
|
||||
"No history available yet. Generate something first!",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
# Build a human readable list
|
||||
lines = ["Here are the most recent generations (oldest first):"]
|
||||
for entry in hist:
|
||||
pid = entry.get("prompt_id", "unknown")
|
||||
prompt = entry.get("prompt") or ""
|
||||
neg = entry.get("negative_prompt") or ""
|
||||
# Truncate long prompts for readability
|
||||
lines.append(
|
||||
f"• ID: {pid} | prompt: '{truncate_text(prompt, 60)}' | negative: '{truncate_text(neg, 60)}'"
|
||||
)
|
||||
await ctx.reply("\n".join(lines), mention_author=False)
|
||||
|
||||
@bot.command(name="get-history", aliases=["gethistory", "gh"], extras={"category": "History"})
|
||||
@require_comfy_client
|
||||
async def get_history_command(ctx: commands.Context, *, arg: str = "") -> None:
|
||||
"""
|
||||
Retrieve images from a previous generation, or search history by keyword.
|
||||
|
||||
Usage:
|
||||
ttr!gethistory <prompt_id_or_index>
|
||||
ttr!gethistory search:<keyword>
|
||||
|
||||
Provide either the prompt id returned in the generation response
|
||||
(shown in `ttr!history`) or the 1‑based index into the history
|
||||
list. The bot will fetch the images associated with that
|
||||
generation and resend them. If no images are found, you will be
|
||||
notified.
|
||||
|
||||
Use ``search:<keyword>`` to filter history by prompt text, checkpoint
|
||||
name, seed value, or any other override field.
|
||||
"""
|
||||
if not arg:
|
||||
await ctx.reply(
|
||||
"Please provide a prompt id, history index, or `search:<keyword>`. See `ttr!history` for a list.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
# Handle search:<keyword>
|
||||
lower_arg = arg.lower()
|
||||
if lower_arg.startswith("search:"):
|
||||
keyword = arg[len("search:"):].strip()
|
||||
if not keyword:
|
||||
await ctx.reply("Please provide a keyword after `search:`.", mention_author=False)
|
||||
return
|
||||
from generation_db import search_history_for_user, get_history as db_get_history
|
||||
# Use get_history for Discord since Discord bot doesn't have per-user context like the web UI
|
||||
hist = db_get_history(limit=50)
|
||||
matches = [
|
||||
e for e in hist
|
||||
if keyword.lower() in str(e.get("overrides", {})).lower()
|
||||
]
|
||||
if not matches:
|
||||
await ctx.reply(f"No history entries matching `{keyword}`.", mention_author=False)
|
||||
return
|
||||
lines = [f"**History matching `{keyword}`** ({len(matches)} result(s))"]
|
||||
for entry in matches[:10]:
|
||||
pid = entry.get("prompt_id", "unknown")
|
||||
overrides = entry.get("overrides") or {}
|
||||
prompt = str(overrides.get("prompt") or "")
|
||||
lines.append(
|
||||
f"• `{pid[:12]}…` | {truncate_text(prompt, 60) if prompt else '(no prompt)'}"
|
||||
)
|
||||
if len(matches) > 10:
|
||||
lines.append(f"_(showing first 10 of {len(matches)})_")
|
||||
await ctx.reply("\n".join(lines), mention_author=False)
|
||||
return
|
||||
|
||||
# Determine whether arg refers to an index or an id
|
||||
target_id: Optional[str] = None
|
||||
hist = bot.comfy.get_history()
|
||||
|
||||
# If arg is a digit, interpret as 1‑based index
|
||||
if arg.isdigit():
|
||||
idx = int(arg) - 1
|
||||
if idx < 0 or idx >= len(hist):
|
||||
await ctx.reply(
|
||||
f"Index out of range. There are {len(hist)} entries in history.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
target_id = hist[idx]["prompt_id"]
|
||||
else:
|
||||
# Otherwise treat as an explicit prompt id
|
||||
target_id = arg.strip()
|
||||
|
||||
try:
|
||||
images = await bot.comfy.fetch_history_images(target_id)
|
||||
if not images:
|
||||
await ctx.reply(
|
||||
f"No images found for prompt id `{target_id}`.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
files = []
|
||||
for idx, img_bytes in enumerate(images):
|
||||
if idx >= MAX_IMAGES_PER_RESPONSE:
|
||||
break
|
||||
file_obj = BytesIO(img_bytes)
|
||||
file_obj.seek(0)
|
||||
files.append(discord.File(file_obj, filename=f"history_{target_id}_{idx+1}.png"))
|
||||
|
||||
await ctx.reply(
|
||||
content=f"Here are the images for prompt id `{target_id}`:",
|
||||
files=files,
|
||||
mention_author=False,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to fetch history for %s", target_id)
|
||||
await ctx.reply(
|
||||
f"An error occurred: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
178
commands/input_images.py
Normal file
178
commands/input_images.py
Normal file
@@ -0,0 +1,178 @@
|
||||
"""
|
||||
commands/input_images.py
|
||||
========================
|
||||
|
||||
Channel-backed input image management.
|
||||
|
||||
Images uploaded to the designated `comfy-input` channel get a persistent
|
||||
"✅ Set as input" button posted by the bot — one reply per attachment so
|
||||
every image in a multi-image message is independently selectable.
|
||||
|
||||
Persistent views survive bot restarts: on_ready re-registers every view
|
||||
stored in the SQLite database.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import discord
|
||||
from discord.ext import commands
|
||||
|
||||
from image_utils import compress_to_discord_limit
|
||||
from input_image_db import (
|
||||
activate_image_for_slot,
|
||||
get_all_images,
|
||||
upsert_image,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".webp", ".gif", ".bmp"}
|
||||
|
||||
|
||||
|
||||
class PersistentSetInputView(discord.ui.View):
|
||||
"""
|
||||
A persistent view that survives bot restarts.
|
||||
|
||||
One instance is created per DB row (i.e. per attachment).
|
||||
The button's custom_id encodes the row id so the callback can look
|
||||
up the exact filename to download.
|
||||
"""
|
||||
|
||||
def __init__(self, bot, config, row_id: int):
|
||||
super().__init__(timeout=None)
|
||||
self._bot = bot
|
||||
self._config = config
|
||||
self._row_id = row_id
|
||||
|
||||
btn = discord.ui.Button(
|
||||
label="✅ Set as input",
|
||||
style=discord.ButtonStyle.success,
|
||||
custom_id=f"set_input:{row_id}",
|
||||
)
|
||||
btn.callback = self._set_callback
|
||||
self.add_item(btn)
|
||||
|
||||
async def _set_callback(self, interaction: discord.Interaction) -> None:
|
||||
await interaction.response.defer(ephemeral=True)
|
||||
try:
|
||||
filename = activate_image_for_slot(
|
||||
self._row_id, "input_image", self._config.comfy_input_path
|
||||
)
|
||||
self._bot.comfy.state_manager.set_override("input_image", filename)
|
||||
await interaction.followup.send(
|
||||
f"✅ Input image set to `{filename}`", ephemeral=True
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("set_input button failed for row %s", self._row_id)
|
||||
await interaction.followup.send(f"❌ Error: {exc}", ephemeral=True)
|
||||
|
||||
|
||||
async def _register_attachment(bot, config, message: discord.Message, attachment: discord.Attachment) -> None:
|
||||
"""Post a reply with the image preview, a Set-as-input button, and record it in the DB."""
|
||||
logger.info("[_register_attachment] Start")
|
||||
original_data = await attachment.read()
|
||||
original_filename = attachment.filename
|
||||
logger.info("[_register_attachment] Reading attachment")
|
||||
|
||||
# Compress only for the Discord re-send (8 MiB bot limit)
|
||||
send_data, send_filename = compress_to_discord_limit(original_data, original_filename)
|
||||
|
||||
file = discord.File(io.BytesIO(send_data), filename=send_filename)
|
||||
reply = await message.channel.send(f"`{original_filename}`", file=file)
|
||||
|
||||
# Store original quality bytes in DB
|
||||
row_id = upsert_image(message.id, reply.id, message.channel.id, original_filename, image_data=original_data)
|
||||
view = PersistentSetInputView(bot, config, row_id)
|
||||
bot.add_view(view, message_id=reply.id)
|
||||
logger.info("[_register_attachment] Done")
|
||||
await reply.edit(view=view)
|
||||
|
||||
|
||||
def setup_input_image_commands(bot, config=None):
|
||||
"""Register input image commands and the on_message listener."""
|
||||
|
||||
@bot.listen("on_message")
|
||||
async def _on_input_channel_message(message: discord.Message) -> None:
|
||||
"""Watch the comfy-input channel and attach a Set-as-input button to every image upload."""
|
||||
if config is None:
|
||||
logger.warning("[_on_input_channel_message] Config is none")
|
||||
return
|
||||
if message.channel.id != config.comfy_input_channel_id:
|
||||
return
|
||||
if message.author.bot:
|
||||
return
|
||||
|
||||
image_attachments = [
|
||||
a for a in message.attachments
|
||||
if Path(a.filename).suffix.lower() in IMAGE_EXTENSIONS
|
||||
]
|
||||
if not image_attachments:
|
||||
logger.info("[_on_input_channel_message] No image attachments")
|
||||
return
|
||||
|
||||
for attachment in image_attachments:
|
||||
await _register_attachment(bot, config, message, attachment)
|
||||
|
||||
try:
|
||||
await message.delete()
|
||||
except discord.Forbidden:
|
||||
logger.warning("Missing manage_messages permission to delete message %s", message.id)
|
||||
except Exception as exc:
|
||||
logger.warning("Could not delete message %s: %s", message.id, exc)
|
||||
|
||||
@bot.command(
|
||||
name="sync-inputs",
|
||||
aliases=["si"],
|
||||
extras={"category": "Files"},
|
||||
help="Scan the comfy-input channel and add 'Set as input' buttons to any untracked images.",
|
||||
)
|
||||
async def sync_inputs_command(ctx: commands.Context) -> None:
|
||||
"""Backfill Set-as-input buttons for images uploaded while the bot was offline."""
|
||||
if config is None:
|
||||
await ctx.reply("Bot config is not available.", mention_author=False)
|
||||
return
|
||||
|
||||
channel = bot.get_channel(config.comfy_input_channel_id)
|
||||
if channel is None:
|
||||
try:
|
||||
channel = await bot.fetch_channel(config.comfy_input_channel_id)
|
||||
except Exception as exc:
|
||||
await ctx.reply(f"❌ Could not access input channel: {exc}", mention_author=False)
|
||||
return
|
||||
|
||||
# Track existing records as (message_id, filename) pairs
|
||||
existing = {(row["original_message_id"], row["filename"]) for row in get_all_images()}
|
||||
|
||||
new_count = 0
|
||||
async for message in channel.history(limit=None):
|
||||
if message.author.bot:
|
||||
continue
|
||||
|
||||
had_new = False
|
||||
for attachment in message.attachments:
|
||||
if Path(attachment.filename).suffix.lower() not in IMAGE_EXTENSIONS:
|
||||
continue
|
||||
if (message.id, attachment.filename) in existing:
|
||||
continue
|
||||
|
||||
await _register_attachment(bot, config, message, attachment)
|
||||
existing.add((message.id, attachment.filename))
|
||||
new_count += 1
|
||||
had_new = True
|
||||
|
||||
if had_new:
|
||||
try:
|
||||
await message.delete()
|
||||
except Exception as exc:
|
||||
logger.warning("sync-inputs: could not delete message %s: %s", message.id, exc)
|
||||
|
||||
already = len(get_all_images()) - new_count
|
||||
await ctx.reply(
|
||||
f"Synced {new_count} new image(s). {already} already known.",
|
||||
mention_author=False,
|
||||
)
|
||||
370
commands/presets.py
Normal file
370
commands/presets.py
Normal file
@@ -0,0 +1,370 @@
|
||||
"""
|
||||
commands/presets.py
|
||||
===================
|
||||
|
||||
Named workflow preset commands for the Discord ComfyUI bot.
|
||||
|
||||
A preset is a saved snapshot of the current workflow template and runtime
|
||||
state (prompt, negative_prompt, input_image, seed). Presets make it easy
|
||||
to switch between different setups (e.g. "portrait", "landscape", "anime")
|
||||
with a single command.
|
||||
|
||||
All sub-commands are accessed through the single ``ttr!preset`` command:
|
||||
|
||||
ttr!preset save <name> [description:<text>] — capture current workflow + state
|
||||
ttr!preset load <name> — restore workflow + state
|
||||
ttr!preset list — list all saved presets
|
||||
ttr!preset view <name> — show preset details
|
||||
ttr!preset delete <name> — permanently remove a preset
|
||||
ttr!preset save-last <name> [description:<text>] — save last generation as preset
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from discord.ext import commands
|
||||
|
||||
from discord_utils import require_comfy_client
|
||||
from preset_manager import PresetManager
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _parse_name_and_description(args: str) -> tuple[str, str | None]:
|
||||
"""
|
||||
Split ``<name> [description:<text>]`` into (name, description).
|
||||
|
||||
The name is the first whitespace-delimited token. Everything after
|
||||
``description:`` (case-insensitive) in the remaining text is the
|
||||
description. Returns (name, None) if no description keyword is found.
|
||||
"""
|
||||
parts = args.strip().split(maxsplit=1)
|
||||
name = parts[0] if parts else ""
|
||||
description: str | None = None
|
||||
if len(parts) > 1:
|
||||
rest = parts[1]
|
||||
lower = rest.lower()
|
||||
idx = lower.find("description:")
|
||||
if idx >= 0:
|
||||
description = rest[idx + len("description:"):].strip() or None
|
||||
return name, description
|
||||
|
||||
|
||||
def setup_preset_commands(bot):
|
||||
"""
|
||||
Register preset commands with the bot.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bot : commands.Bot
|
||||
The Discord bot instance.
|
||||
"""
|
||||
preset_manager = PresetManager()
|
||||
|
||||
@bot.command(name="preset", extras={"category": "Presets"})
|
||||
@require_comfy_client
|
||||
async def preset_command(ctx: commands.Context, *, args: str = "") -> None:
|
||||
"""
|
||||
Save, load, list, view, or delete named workflow presets.
|
||||
|
||||
A preset captures the current workflow template and all runtime
|
||||
state changes (prompt, negative_prompt, input_image, seed) under a
|
||||
short name. Load it later to restore everything in one step.
|
||||
|
||||
Usage:
|
||||
ttr!preset save <name> [description:<text>] — save current workflow + state
|
||||
ttr!preset load <name> — restore workflow + state
|
||||
ttr!preset list — list all saved presets
|
||||
ttr!preset view <name> — show preset details
|
||||
ttr!preset delete <name> — permanently delete a preset
|
||||
ttr!preset save-last <name> [description:<text>] — save last generation as preset
|
||||
|
||||
Names may only contain letters, digits, hyphens, and underscores.
|
||||
|
||||
Examples:
|
||||
ttr!preset save portrait description:studio lighting style
|
||||
ttr!preset load portrait
|
||||
ttr!preset list
|
||||
ttr!preset view portrait
|
||||
ttr!preset delete portrait
|
||||
ttr!preset save-last my-last
|
||||
"""
|
||||
parts = args.strip().split(maxsplit=1)
|
||||
subcommand = parts[0].lower() if parts else ""
|
||||
rest = parts[1].strip() if len(parts) > 1 else ""
|
||||
|
||||
if subcommand == "save":
|
||||
name, description = _parse_name_and_description(rest)
|
||||
await _preset_save(ctx, bot, preset_manager, name, description)
|
||||
elif subcommand == "load":
|
||||
await _preset_load(ctx, bot, preset_manager, rest.split()[0] if rest.split() else "")
|
||||
elif subcommand == "list":
|
||||
await _preset_list(ctx, preset_manager)
|
||||
elif subcommand == "view":
|
||||
await _preset_view(ctx, preset_manager, rest.split()[0] if rest.split() else "")
|
||||
elif subcommand == "delete":
|
||||
await _preset_delete(ctx, preset_manager, rest.split()[0] if rest.split() else "")
|
||||
elif subcommand == "save-last":
|
||||
name, description = _parse_name_and_description(rest)
|
||||
await _preset_save_last(ctx, preset_manager, name, description)
|
||||
else:
|
||||
await ctx.reply(
|
||||
"Usage: `ttr!preset <save|load|list|view|delete|save-last> [name]`\n"
|
||||
"Run `ttr!help preset` for full details.",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
|
||||
async def _preset_save(
|
||||
ctx: commands.Context, bot, preset_manager: PresetManager, name: str,
|
||||
description: str | None = None,
|
||||
) -> None:
|
||||
"""Handle ttr!preset save <name> [description:<text>]."""
|
||||
if not name:
|
||||
await ctx.reply(
|
||||
"Please provide a name. Example: `ttr!preset save portrait`",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
if not PresetManager.is_valid_name(name):
|
||||
await ctx.reply(
|
||||
"Invalid name. Use only letters, digits, hyphens, and underscores (max 64 chars).",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
workflow_template = bot.comfy.get_workflow_template()
|
||||
state = bot.comfy.get_workflow_current_changes()
|
||||
preset_manager.save(name, workflow_template, state, description=description)
|
||||
|
||||
# Build a summary of what was saved
|
||||
has_workflow = workflow_template is not None
|
||||
state_parts = []
|
||||
if state.get("prompt"):
|
||||
state_parts.append("prompt")
|
||||
if state.get("negative_prompt"):
|
||||
state_parts.append("negative_prompt")
|
||||
if state.get("input_image"):
|
||||
state_parts.append("input_image")
|
||||
if state.get("seed") is not None:
|
||||
state_parts.append(f"seed={state['seed']}")
|
||||
|
||||
summary_parts = []
|
||||
if has_workflow:
|
||||
summary_parts.append("workflow template")
|
||||
summary_parts.extend(state_parts)
|
||||
summary = ", ".join(summary_parts) if summary_parts else "empty state"
|
||||
|
||||
desc_note = f"\n> {description}" if description else ""
|
||||
await ctx.reply(
|
||||
f"Preset **{name}** saved ({summary}).{desc_note}",
|
||||
mention_author=False,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to save preset '%s'", name)
|
||||
await ctx.reply(
|
||||
f"Failed to save preset: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
|
||||
async def _preset_load(
|
||||
ctx: commands.Context, bot, preset_manager: PresetManager, name: str
|
||||
) -> None:
|
||||
"""Handle ttr!preset load <name>."""
|
||||
if not name:
|
||||
await ctx.reply(
|
||||
"Please provide a name. Example: `ttr!preset load portrait`",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
data = preset_manager.load(name)
|
||||
if data is None:
|
||||
presets = preset_manager.list_presets()
|
||||
hint = f" Available: {', '.join(presets)}" if presets else " No presets saved yet."
|
||||
await ctx.reply(
|
||||
f"Preset **{name}** not found.{hint}",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
restored: list[str] = []
|
||||
|
||||
# Restore workflow template if present
|
||||
workflow = data.get("workflow")
|
||||
if workflow is not None:
|
||||
bot.comfy.set_workflow(workflow)
|
||||
restored.append("workflow template")
|
||||
|
||||
# Restore state changes
|
||||
state = data.get("state", {})
|
||||
if state:
|
||||
bot.comfy.set_workflow_current_changes(state)
|
||||
if state.get("prompt"):
|
||||
restored.append("prompt")
|
||||
if state.get("negative_prompt"):
|
||||
restored.append("negative_prompt")
|
||||
if state.get("input_image"):
|
||||
restored.append("input_image")
|
||||
if state.get("seed") is not None:
|
||||
restored.append(f"seed={state['seed']}")
|
||||
|
||||
summary = ", ".join(restored) if restored else "nothing (preset was empty)"
|
||||
description = data.get("description")
|
||||
desc_note = f"\n> {description}" if description else ""
|
||||
await ctx.reply(
|
||||
f"Preset **{name}** loaded ({summary}).{desc_note}",
|
||||
mention_author=False,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to load preset '%s'", name)
|
||||
await ctx.reply(
|
||||
f"Failed to load preset: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
|
||||
async def _preset_view(
|
||||
ctx: commands.Context, preset_manager: PresetManager, name: str
|
||||
) -> None:
|
||||
"""Handle ttr!preset view <name>."""
|
||||
if not name:
|
||||
await ctx.reply(
|
||||
"Please provide a name. Example: `ttr!preset view portrait`",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
data = preset_manager.load(name)
|
||||
if data is None:
|
||||
await ctx.reply(f"Preset **{name}** not found.", mention_author=False)
|
||||
return
|
||||
|
||||
lines = [f"**Preset: {name}**"]
|
||||
if data.get("description"):
|
||||
lines.append(f"> {data['description']}")
|
||||
if data.get("owner"):
|
||||
lines.append(f"Owner: {data['owner']}")
|
||||
|
||||
state = data.get("state", {})
|
||||
if state.get("prompt"):
|
||||
# Truncate long prompts
|
||||
p = str(state["prompt"])
|
||||
if len(p) > 200:
|
||||
p = p[:197] + "…"
|
||||
lines.append(f"**Prompt:** {p}")
|
||||
if state.get("negative_prompt"):
|
||||
np = str(state["negative_prompt"])
|
||||
if len(np) > 100:
|
||||
np = np[:97] + "…"
|
||||
lines.append(f"**Negative:** {np}")
|
||||
if state.get("seed") is not None:
|
||||
seed_note = " (random)" if state["seed"] == -1 else ""
|
||||
lines.append(f"**Seed:** {state['seed']}{seed_note}")
|
||||
|
||||
other = {k: v for k, v in state.items() if k not in ("prompt", "negative_prompt", "seed", "input_image")}
|
||||
if other:
|
||||
other_str = ", ".join(f"{k}={v}" for k, v in other.items())
|
||||
lines.append(f"**Other:** {other_str[:200]}")
|
||||
|
||||
if data.get("workflow") is not None:
|
||||
lines.append("_(includes workflow template)_")
|
||||
else:
|
||||
lines.append("_(no workflow template — load separately)_")
|
||||
|
||||
await ctx.reply("\n".join(lines), mention_author=False)
|
||||
|
||||
|
||||
async def _preset_list(ctx: commands.Context, preset_manager: PresetManager) -> None:
|
||||
"""Handle ttr!preset list."""
|
||||
presets = preset_manager.list_preset_details()
|
||||
if not presets:
|
||||
await ctx.reply(
|
||||
"No presets saved yet. Use `ttr!preset save <name>` to create one.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
lines = [f"**Saved presets** ({len(presets)})"]
|
||||
for p in presets:
|
||||
entry = f" • {p['name']}"
|
||||
if p.get("description"):
|
||||
entry += f" — {p['description']}"
|
||||
lines.append(entry)
|
||||
lines.append("\nUse `ttr!preset load <name>` to restore one.")
|
||||
await ctx.reply("\n".join(lines), mention_author=False)
|
||||
|
||||
|
||||
async def _preset_delete(
|
||||
ctx: commands.Context, preset_manager: PresetManager, name: str
|
||||
) -> None:
|
||||
"""Handle ttr!preset delete <name>."""
|
||||
if not name:
|
||||
await ctx.reply(
|
||||
"Please provide a name. Example: `ttr!preset delete portrait`",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
deleted = preset_manager.delete(name)
|
||||
if deleted:
|
||||
await ctx.reply(f"Preset **{name}** deleted.", mention_author=False)
|
||||
else:
|
||||
await ctx.reply(
|
||||
f"Preset **{name}** not found.",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
|
||||
async def _preset_save_last(
|
||||
ctx: commands.Context, preset_manager: PresetManager, name: str,
|
||||
description: str | None = None,
|
||||
) -> None:
|
||||
"""Handle ttr!preset save-last <name> [description:<text>]."""
|
||||
if not name:
|
||||
await ctx.reply(
|
||||
"Please provide a name. Example: `ttr!preset save-last my-last`",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
if not PresetManager.is_valid_name(name):
|
||||
await ctx.reply(
|
||||
"Invalid name. Use only letters, digits, hyphens, and underscores (max 64 chars).",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
from generation_db import get_history as db_get_history
|
||||
history = db_get_history(limit=1)
|
||||
if not history:
|
||||
await ctx.reply(
|
||||
"No generation history found. Generate something first!",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
last = history[0]
|
||||
overrides = last.get("overrides") or {}
|
||||
try:
|
||||
preset_manager.save(name, None, overrides, description=description)
|
||||
desc_note = f"\n> {description}" if description else ""
|
||||
await ctx.reply(
|
||||
f"Preset **{name}** saved from last generation.{desc_note}\n"
|
||||
"Note: workflow template not included — load it separately before generating.",
|
||||
mention_author=False,
|
||||
)
|
||||
except ValueError as exc:
|
||||
await ctx.reply(str(exc), mention_author=False)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to save preset '%s' from history", name)
|
||||
await ctx.reply(
|
||||
f"Failed to save preset: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
484
commands/server.py
Normal file
484
commands/server.py
Normal file
@@ -0,0 +1,484 @@
|
||||
"""
|
||||
commands/server.py
|
||||
==================
|
||||
|
||||
ComfyUI server lifecycle management via NSSM Windows service.
|
||||
|
||||
On bot startup, `autostart_comfy()` runs as a background task:
|
||||
1. If the service does not exist, it is installed automatically.
|
||||
2. If the service exists but ComfyUI is not responding, it is started.
|
||||
|
||||
NSSM handles:
|
||||
- Background process management (no console window)
|
||||
- Stdout / stderr capture to rotating log files
|
||||
- Complete isolation from the bot's own NSSM service
|
||||
|
||||
Commands:
|
||||
ttr!server start — start the service
|
||||
ttr!server stop — stop the service
|
||||
ttr!server restart — restart the service
|
||||
ttr!server status — NSSM service state + HTTP reachability
|
||||
ttr!server install — (re)install / reconfigure the NSSM service
|
||||
ttr!server uninstall — remove the service from Windows
|
||||
|
||||
Requires:
|
||||
- nssm.exe in PATH
|
||||
- The bot service account must have permission to manage Windows services
|
||||
(Local System or a user with SeServiceLogonRight works)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import aiohttp
|
||||
from discord.ext import commands
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_POLL_INTERVAL = 5 # seconds between HTTP up-checks
|
||||
_MAX_ATTEMPTS = 24 # 24 × 5s = 120s max wait
|
||||
|
||||
# Public — imported by status_monitor for emoji rendering
|
||||
STATUS_EMOJI: dict[str, str] = {
|
||||
"SERVICE_RUNNING": "🟢",
|
||||
"SERVICE_STOPPED": "🔴",
|
||||
"SERVICE_PAUSED": "🟡",
|
||||
"SERVICE_START_PENDING": "⏳",
|
||||
"SERVICE_STOP_PENDING": "⏳",
|
||||
"SERVICE_PAUSE_PENDING": "⏳",
|
||||
"SERVICE_CONTINUE_PENDING": "⏳",
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Low-level subprocess helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def _nssm(*args: str) -> tuple[int, str]:
|
||||
"""Run `nssm <args>` and return (returncode, stdout)."""
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
"nssm", *args,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT,
|
||||
)
|
||||
stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=30)
|
||||
return proc.returncode, stdout.decode(errors="replace").strip()
|
||||
except FileNotFoundError:
|
||||
return -1, "nssm not found — is it installed and in PATH?"
|
||||
except asyncio.TimeoutError:
|
||||
return -1, "nssm command timed out."
|
||||
except Exception as exc:
|
||||
return -1, str(exc)
|
||||
|
||||
|
||||
async def _get_service_pid(service_name: str) -> int:
|
||||
"""Return the PID of the process backing *service_name*, or 0 if unavailable."""
|
||||
rc, out = await _nssm("getpid", service_name)
|
||||
if rc != 0:
|
||||
return 0
|
||||
try:
|
||||
return int(out.strip())
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
async def _kill_service_process(service_name: str) -> None:
|
||||
"""
|
||||
Forcefully kill the process backing *service_name*.
|
||||
|
||||
NSSM does not have a `kill` subcommand. Instead we retrieve the PID
|
||||
via `nssm getpid` and then use `taskkill /F /PID`. Safe to call when
|
||||
the service is already stopped (no-op if PID is 0).
|
||||
"""
|
||||
pid = await _get_service_pid(service_name)
|
||||
if not pid:
|
||||
return
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
"taskkill", "/F", "/PID", str(pid),
|
||||
stdout=asyncio.subprocess.DEVNULL,
|
||||
stderr=asyncio.subprocess.DEVNULL,
|
||||
)
|
||||
await asyncio.wait_for(proc.communicate(), timeout=10)
|
||||
logger.debug("taskkill /F /PID %d sent for service '%s'", pid, service_name)
|
||||
except Exception as exc:
|
||||
logger.warning("taskkill failed for PID %d (%s): %s", pid, service_name, exc)
|
||||
|
||||
|
||||
async def _is_comfy_up(server_address: str, timeout: float = 3.0) -> bool:
|
||||
"""Return True if the ComfyUI HTTP endpoint is responding."""
|
||||
url = f"http://{server_address}/system_stats"
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(url, timeout=aiohttp.ClientTimeout(total=timeout)) as resp:
|
||||
return resp.status == 200
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
async def _service_exists(service_name: str) -> bool:
|
||||
"""Return True if the Windows service is installed (running or stopped)."""
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
"sc", "query", service_name,
|
||||
stdout=asyncio.subprocess.DEVNULL,
|
||||
stderr=asyncio.subprocess.DEVNULL,
|
||||
)
|
||||
await proc.communicate()
|
||||
return proc.returncode == 0
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API — used by status_monitor and other modules
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def get_service_state(service_name: str) -> str:
|
||||
"""
|
||||
Return the NSSM service state string for *service_name*.
|
||||
|
||||
Returns one of the SERVICE_* keys in STATUS_EMOJI on success, or
|
||||
"error" / "timeout" / "unknown" on failure. Intended for use by
|
||||
the status dashboard — callers should not raise on these sentinel values.
|
||||
"""
|
||||
try:
|
||||
rc, out = await asyncio.wait_for(_nssm("status", service_name), timeout=5.0)
|
||||
if rc == -1:
|
||||
return "error"
|
||||
return out.strip() or "unknown"
|
||||
except asyncio.TimeoutError:
|
||||
return "timeout"
|
||||
except Exception:
|
||||
return "error"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Service installation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def _install_service(config) -> tuple[bool, str]:
|
||||
"""
|
||||
Install the ComfyUI NSSM service with log capture and rotation.
|
||||
|
||||
We install directly via python.exe (not the .bat file) to avoid the
|
||||
"Terminate batch job (Y/N)?" prompt that can cause NSSM to hang on STOP.
|
||||
|
||||
Safe to call even if the service already exists — it will be removed first.
|
||||
Returns (success, message).
|
||||
"""
|
||||
name = config.comfy_service_name
|
||||
start_bat = Path(config.comfy_start_bat)
|
||||
log_dir = Path(config.comfy_log_dir)
|
||||
log_file = str(log_dir / "comfyui.log")
|
||||
max_bytes = str(config.comfy_log_max_mb * 1024 * 1024)
|
||||
|
||||
# Derive portable paths from the .bat location (ComfyUI_windows_portable root):
|
||||
# <root>/run_nvidia_gpu.bat
|
||||
# <root>/python_embeded/python.exe
|
||||
# <root>/ComfyUI/main.py
|
||||
portable_root = start_bat.parent
|
||||
python_exe = portable_root / "python_embeded" / "python.exe"
|
||||
main_py = portable_root / "ComfyUI" / "main.py"
|
||||
|
||||
if not start_bat.exists():
|
||||
return False, f"Start bat not found (used to derive paths): `{start_bat}`"
|
||||
if not python_exe.exists():
|
||||
return False, f"Portable python not found: `{python_exe}`"
|
||||
if not main_py.exists():
|
||||
return False, f"ComfyUI main.py not found: `{main_py}`"
|
||||
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Optional extra args from config (accepts string or list/tuple)
|
||||
extra_args: list[str] = []
|
||||
extra = getattr(config, "comfy_extra_args", None)
|
||||
try:
|
||||
if isinstance(extra, (list, tuple)):
|
||||
extra_args = [str(x) for x in extra if str(x).strip()]
|
||||
elif isinstance(extra, str) and extra.strip():
|
||||
import shlex
|
||||
extra_args = shlex.split(extra)
|
||||
except Exception:
|
||||
extra_args = [] # ignore parse errors rather than aborting install
|
||||
|
||||
# Remove any existing service cleanly before reinstalling
|
||||
if await _service_exists(name):
|
||||
await _nssm("stop", name)
|
||||
await _kill_service_process(name) # force-kill if stuck in STOP_PENDING
|
||||
rc, out = await _nssm("remove", name, "confirm")
|
||||
if rc != 0:
|
||||
return False, f"Could not remove existing service: {out}"
|
||||
|
||||
# nssm install <name> <python.exe> -s <main.py> --windows-standalone-build [extra]
|
||||
steps: list[tuple[str, ...]] = [
|
||||
("install", name, str(python_exe), "-s", str(main_py), "--windows-standalone-build", *extra_args),
|
||||
("set", name, "AppDirectory", str(portable_root)),
|
||||
("set", name, "DisplayName", "ComfyUI Server"),
|
||||
("set", name, "AppStdout", log_file),
|
||||
("set", name, "AppStderr", log_file),
|
||||
("set", name, "AppRotateFiles", "1"),
|
||||
("set", name, "AppRotateBytes", max_bytes),
|
||||
("set", name, "AppRotateOnline", "1"),
|
||||
("set", name, "Start", "SERVICE_DEMAND_START"),
|
||||
# Stop behavior — prevent NSSM from hanging indefinitely
|
||||
("set", name, "AppKillProcessTree", "1"),
|
||||
("set", name, "AppStopMethodConsole", "1500"),
|
||||
("set", name, "AppStopMethodWindow", "1500"),
|
||||
("set", name, "AppStopMethodThreads", "1500"),
|
||||
]
|
||||
|
||||
for step in steps:
|
||||
rc, out = await _nssm(*step)
|
||||
if rc != 0:
|
||||
return False, f"`nssm {' '.join(step[:3])}` failed: {out}"
|
||||
|
||||
return True, f"Service `{name}` installed. Log: `{log_file}`"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Autostart (called from bot.py on_ready)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def autostart_comfy(config) -> None:
|
||||
"""
|
||||
Ensure ComfyUI is running when the bot starts.
|
||||
|
||||
1. Install the NSSM service if it is missing.
|
||||
2. Start the service if ComfyUI is not already responding.
|
||||
|
||||
Does nothing if config.comfy_autostart is False.
|
||||
"""
|
||||
if not getattr(config, "comfy_autostart", True):
|
||||
return
|
||||
|
||||
if not await _service_exists(config.comfy_service_name):
|
||||
logger.info("NSSM service '%s' not found — installing", config.comfy_service_name)
|
||||
ok, msg = await _install_service(config)
|
||||
if not ok:
|
||||
logger.error("Failed to install ComfyUI service: %s", msg)
|
||||
return
|
||||
logger.info("ComfyUI service installed: %s", msg)
|
||||
|
||||
if await _is_comfy_up(config.comfy_server):
|
||||
logger.info("ComfyUI already running at %s", config.comfy_server)
|
||||
return
|
||||
|
||||
logger.info("Starting NSSM service '%s'", config.comfy_service_name)
|
||||
rc, out = await _nssm("start", config.comfy_service_name)
|
||||
if rc != 0:
|
||||
logger.warning("nssm start returned %d: %s", rc, out)
|
||||
return
|
||||
|
||||
for attempt in range(_MAX_ATTEMPTS):
|
||||
await asyncio.sleep(_POLL_INTERVAL)
|
||||
if await _is_comfy_up(config.comfy_server):
|
||||
logger.info("ComfyUI is up after ~%ds", (attempt + 1) * _POLL_INTERVAL)
|
||||
return
|
||||
|
||||
logger.warning(
|
||||
"ComfyUI did not respond within %ds after service start",
|
||||
_MAX_ATTEMPTS * _POLL_INTERVAL,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Discord commands
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def setup_server_commands(bot, config=None):
|
||||
"""Register ComfyUI server management commands."""
|
||||
|
||||
def _no_config(ctx):
|
||||
"""Reply and return True when config is missing (guards every subcommand)."""
|
||||
return config is None
|
||||
|
||||
@bot.group(name="server", invoke_without_command=True, extras={"category": "Server"})
|
||||
async def server_group(ctx: commands.Context) -> None:
|
||||
"""ComfyUI server management. Subcommands: start, stop, restart, status, install, uninstall."""
|
||||
await ctx.send_help(ctx.command)
|
||||
|
||||
@server_group.command(name="start")
|
||||
async def server_start(ctx: commands.Context) -> None:
|
||||
"""Start the ComfyUI service."""
|
||||
if config is None:
|
||||
await ctx.reply("Bot config not available.", mention_author=False)
|
||||
return
|
||||
|
||||
if await _is_comfy_up(config.comfy_server):
|
||||
await ctx.reply("✅ ComfyUI is already running.", mention_author=False)
|
||||
return
|
||||
|
||||
msg = await ctx.reply(
|
||||
f"⏳ Starting service `{config.comfy_service_name}`…", mention_author=False
|
||||
)
|
||||
rc, out = await _nssm("start", config.comfy_service_name)
|
||||
if rc != 0:
|
||||
await msg.edit(content=f"❌ `{out}`")
|
||||
return
|
||||
|
||||
await msg.edit(content="⏳ Waiting for ComfyUI to respond…")
|
||||
for attempt in range(_MAX_ATTEMPTS):
|
||||
await asyncio.sleep(_POLL_INTERVAL)
|
||||
if await _is_comfy_up(config.comfy_server):
|
||||
await msg.edit(
|
||||
content=f"✅ ComfyUI is up! (took ~{(attempt + 1) * _POLL_INTERVAL}s)"
|
||||
)
|
||||
return
|
||||
|
||||
await msg.edit(content="⚠️ Service started but ComfyUI did not respond within 120 seconds.")
|
||||
|
||||
@server_group.command(name="stop")
|
||||
async def server_stop(ctx: commands.Context) -> None:
|
||||
"""Stop the ComfyUI service (force-kills if graceful stop fails)."""
|
||||
if config is None:
|
||||
await ctx.reply("Bot config not available.", mention_author=False)
|
||||
return
|
||||
|
||||
msg = await ctx.reply(
|
||||
f"⏳ Stopping service `{config.comfy_service_name}`…", mention_author=False
|
||||
)
|
||||
rc, out = await _nssm("stop", config.comfy_service_name)
|
||||
if rc == 0:
|
||||
await msg.edit(content="✅ ComfyUI service stopped.")
|
||||
return
|
||||
|
||||
# Graceful stop failed (timed out or error) — force-kill the process.
|
||||
await msg.edit(content="⏳ Graceful stop failed — force-killing process…")
|
||||
await _kill_service_process(config.comfy_service_name)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
state = await get_service_state(config.comfy_service_name)
|
||||
if state == "SERVICE_STOPPED":
|
||||
await msg.edit(content="✅ ComfyUI service force-killed and stopped.")
|
||||
else:
|
||||
await msg.edit(
|
||||
content=f"⚠️ Force-kill sent but service state is `{state}`. "
|
||||
f"Use `ttr!server kill` to try again."
|
||||
)
|
||||
|
||||
@server_group.command(name="kill")
|
||||
async def server_kill(ctx: commands.Context) -> None:
|
||||
"""Force-kill the ComfyUI process when it is stuck in STOPPING/STOP_PENDING."""
|
||||
if config is None:
|
||||
await ctx.reply("Bot config not available.", mention_author=False)
|
||||
return
|
||||
|
||||
msg = await ctx.reply(
|
||||
f"⏳ Force-killing `{config.comfy_service_name}` process…", mention_author=False
|
||||
)
|
||||
await _kill_service_process(config.comfy_service_name)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
state = await get_service_state(config.comfy_service_name)
|
||||
emoji = STATUS_EMOJI.get(state, "⚪")
|
||||
await msg.edit(
|
||||
content=f"💀 taskkill sent. Service state is now {emoji} `{state}`."
|
||||
)
|
||||
|
||||
@server_group.command(name="restart")
|
||||
async def server_restart(ctx: commands.Context) -> None:
|
||||
"""Restart the ComfyUI service (force-kills if graceful stop fails)."""
|
||||
if config is None:
|
||||
await ctx.reply("Bot config not available.", mention_author=False)
|
||||
return
|
||||
|
||||
msg = await ctx.reply(
|
||||
f"⏳ Stopping `{config.comfy_service_name}` for restart…", mention_author=False
|
||||
)
|
||||
|
||||
# Step 1: graceful stop.
|
||||
rc, out = await _nssm("stop", config.comfy_service_name)
|
||||
if rc != 0:
|
||||
# Stop timed out or failed — force-kill so we can start fresh.
|
||||
await msg.edit(content="⏳ Graceful stop failed — force-killing process…")
|
||||
await _kill_service_process(config.comfy_service_name)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
# Step 2: verify stopped before starting.
|
||||
state = await get_service_state(config.comfy_service_name)
|
||||
if state not in ("SERVICE_STOPPED", "error", "unknown", "timeout"):
|
||||
# Still not fully stopped — try one more force-kill.
|
||||
await _kill_service_process(config.comfy_service_name)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
# Step 3: start.
|
||||
await msg.edit(content=f"⏳ Starting `{config.comfy_service_name}`…")
|
||||
rc, out = await _nssm("start", config.comfy_service_name)
|
||||
if rc != 0:
|
||||
await msg.edit(content=f"❌ Start failed: `{out}`")
|
||||
return
|
||||
|
||||
# Step 4: wait for HTTP.
|
||||
await msg.edit(content="⏳ Waiting for ComfyUI to come back up…")
|
||||
for attempt in range(_MAX_ATTEMPTS):
|
||||
await asyncio.sleep(_POLL_INTERVAL)
|
||||
if await _is_comfy_up(config.comfy_server):
|
||||
await msg.edit(
|
||||
content=f"✅ ComfyUI is back up! (took ~{(attempt + 1) * _POLL_INTERVAL}s)"
|
||||
)
|
||||
return
|
||||
|
||||
await msg.edit(content="⚠️ Service started but ComfyUI did not respond within 120 seconds.")
|
||||
|
||||
@server_group.command(name="status")
|
||||
async def server_status(ctx: commands.Context) -> None:
|
||||
"""Show NSSM service state and HTTP reachability."""
|
||||
if config is None:
|
||||
await ctx.reply("Bot config not available.", mention_author=False)
|
||||
return
|
||||
|
||||
state, http_up = await asyncio.gather(
|
||||
get_service_state(config.comfy_service_name),
|
||||
_is_comfy_up(config.comfy_server),
|
||||
)
|
||||
|
||||
emoji = STATUS_EMOJI.get(state, "⚪")
|
||||
svc_line = f"{emoji} `{state}`"
|
||||
http_line = (
|
||||
f"🟢 Responding at `{config.comfy_server}`"
|
||||
if http_up else
|
||||
f"🔴 Not responding at `{config.comfy_server}`"
|
||||
)
|
||||
|
||||
await ctx.reply(
|
||||
f"**ComfyUI Server Status**\n"
|
||||
f"Service `{config.comfy_service_name}`: {svc_line}\n"
|
||||
f"HTTP: {http_line}",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
@server_group.command(name="install")
|
||||
async def server_install(ctx: commands.Context) -> None:
|
||||
"""(Re)install the ComfyUI NSSM service with current config settings."""
|
||||
if config is None:
|
||||
await ctx.reply("Bot config not available.", mention_author=False)
|
||||
return
|
||||
|
||||
msg = await ctx.reply(
|
||||
f"⏳ Installing service `{config.comfy_service_name}`…", mention_author=False
|
||||
)
|
||||
ok, detail = await _install_service(config)
|
||||
await msg.edit(content=f"{'✅' if ok else '❌'} {detail}")
|
||||
|
||||
@server_group.command(name="uninstall")
|
||||
async def server_uninstall(ctx: commands.Context) -> None:
|
||||
"""Stop and remove the ComfyUI NSSM service from Windows."""
|
||||
if config is None:
|
||||
await ctx.reply("Bot config not available.", mention_author=False)
|
||||
return
|
||||
|
||||
msg = await ctx.reply(
|
||||
f"⏳ Removing service `{config.comfy_service_name}`…", mention_author=False
|
||||
)
|
||||
await _nssm("stop", config.comfy_service_name)
|
||||
await _kill_service_process(config.comfy_service_name)
|
||||
rc, out = await _nssm("remove", config.comfy_service_name, "confirm")
|
||||
if rc == 0:
|
||||
await msg.edit(content=f"✅ Service `{config.comfy_service_name}` removed.")
|
||||
else:
|
||||
await msg.edit(content=f"❌ `{out}`")
|
||||
268
commands/utility.py
Normal file
268
commands/utility.py
Normal file
@@ -0,0 +1,268 @@
|
||||
"""
|
||||
commands/utility.py
|
||||
===================
|
||||
|
||||
Quality-of-life utility commands for the Discord ComfyUI bot.
|
||||
|
||||
Commands provided:
|
||||
- ping: Show bot latency (Discord WebSocket round-trip).
|
||||
- status: Full overview of bot health, ComfyUI connectivity,
|
||||
workflow state, and queue.
|
||||
- queue-status: Quick view of pending job count and worker state.
|
||||
- uptime: How long the bot has been running since it connected.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from discord.ext import commands
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _format_uptime(start_time: datetime) -> str:
|
||||
"""Return a human-readable uptime string from a UTC start datetime."""
|
||||
delta = datetime.now(timezone.utc) - start_time
|
||||
total_seconds = int(delta.total_seconds())
|
||||
days, remainder = divmod(total_seconds, 86400)
|
||||
hours, remainder = divmod(remainder, 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
if days:
|
||||
return f"{days}d {hours}h {minutes}m {seconds}s"
|
||||
if hours:
|
||||
return f"{hours}h {minutes}m {seconds}s"
|
||||
return f"{minutes}m {seconds}s"
|
||||
|
||||
|
||||
def setup_utility_commands(bot):
|
||||
"""
|
||||
Register quality-of-life utility commands with the bot.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bot : commands.Bot
|
||||
The Discord bot instance.
|
||||
"""
|
||||
|
||||
@bot.command(name="ping", extras={"category": "Utility"})
|
||||
async def ping_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Show the bot's current Discord WebSocket latency.
|
||||
|
||||
Usage:
|
||||
ttr!ping
|
||||
"""
|
||||
latency_ms = round(bot.latency * 1000)
|
||||
await ctx.reply(f"Pong! Latency: **{latency_ms} ms**", mention_author=False)
|
||||
|
||||
@bot.command(name="status", extras={"category": "Utility"})
|
||||
async def status_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Show a full health overview of the bot and ComfyUI.
|
||||
|
||||
Displays:
|
||||
- Bot latency and uptime
|
||||
- ComfyUI server address and reachability
|
||||
- Whether a workflow template is loaded
|
||||
- Current workflow changes (prompt / negative_prompt / input_image)
|
||||
- Job queue size and worker state
|
||||
|
||||
Usage:
|
||||
ttr!status
|
||||
"""
|
||||
latency_ms = round(bot.latency * 1000)
|
||||
|
||||
# Uptime
|
||||
if hasattr(bot, "start_time") and bot.start_time:
|
||||
uptime_str = _format_uptime(bot.start_time)
|
||||
else:
|
||||
uptime_str = "N/A"
|
||||
|
||||
# ComfyUI info
|
||||
comfy_ok = hasattr(bot, "comfy") and bot.comfy is not None
|
||||
comfy_server = bot.comfy.server_address if comfy_ok else "not configured"
|
||||
comfy_reachable = await bot.comfy.check_connection() if comfy_ok else False
|
||||
workflow_loaded = comfy_ok and bot.comfy.get_workflow_template() is not None
|
||||
|
||||
# ComfyUI queue
|
||||
comfy_pending = 0
|
||||
comfy_running = 0
|
||||
if comfy_ok:
|
||||
q = await bot.comfy.get_comfy_queue()
|
||||
if q:
|
||||
comfy_pending = len(q.get("queue_pending", []))
|
||||
comfy_running = len(q.get("queue_running", []))
|
||||
|
||||
# Workflow state summary
|
||||
changes_parts: list[str] = []
|
||||
if comfy_ok:
|
||||
overrides = bot.comfy.state_manager.get_overrides()
|
||||
if overrides.get("prompt"):
|
||||
changes_parts.append("prompt")
|
||||
if overrides.get("negative_prompt"):
|
||||
changes_parts.append("negative_prompt")
|
||||
if overrides.get("input_image"):
|
||||
changes_parts.append(f"input_image: {overrides['input_image']}")
|
||||
if overrides.get("seed") is not None:
|
||||
changes_parts.append(f"seed={overrides['seed']}")
|
||||
changes_summary = ", ".join(changes_parts) if changes_parts else "none"
|
||||
|
||||
conn_status = (
|
||||
"reachable" if comfy_reachable
|
||||
else ("unreachable" if comfy_ok else "not configured")
|
||||
)
|
||||
|
||||
lines = [
|
||||
"**Bot**",
|
||||
f" Latency : {latency_ms} ms",
|
||||
f" Uptime : {uptime_str}",
|
||||
"",
|
||||
f"**ComfyUI** — `{comfy_server}`",
|
||||
f" Connection : {conn_status}",
|
||||
f" Queue : {comfy_running} running, {comfy_pending} pending",
|
||||
f" Workflow : {'loaded' if workflow_loaded else 'not loaded'}",
|
||||
f" Changes set : {changes_summary}",
|
||||
]
|
||||
await ctx.reply("\n".join(lines), mention_author=False)
|
||||
|
||||
@bot.command(name="queue-status", aliases=["qs", "qstatus"], extras={"category": "Utility"})
|
||||
async def queue_status_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Show the current ComfyUI queue depth.
|
||||
|
||||
Usage:
|
||||
ttr!queue-status
|
||||
ttr!qs
|
||||
"""
|
||||
if not hasattr(bot, "comfy") or not bot.comfy:
|
||||
await ctx.reply("ComfyUI client is not configured.", mention_author=False)
|
||||
return
|
||||
|
||||
q = await bot.comfy.get_comfy_queue()
|
||||
if q is None:
|
||||
await ctx.reply("Could not reach ComfyUI server.", mention_author=False)
|
||||
return
|
||||
|
||||
pending = len(q.get("queue_pending", []))
|
||||
running = len(q.get("queue_running", []))
|
||||
await ctx.reply(
|
||||
f"ComfyUI queue: **{running}** running, **{pending}** pending.",
|
||||
mention_author=False,
|
||||
)
|
||||
|
||||
@bot.command(name="uptime", extras={"category": "Utility"})
|
||||
async def uptime_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Show how long the bot has been running since it last connected.
|
||||
|
||||
Usage:
|
||||
ttr!uptime
|
||||
"""
|
||||
if not hasattr(bot, "start_time") or not bot.start_time:
|
||||
await ctx.reply("Uptime information is not available.", mention_author=False)
|
||||
return
|
||||
uptime_str = _format_uptime(bot.start_time)
|
||||
await ctx.reply(f"Uptime: **{uptime_str}**", mention_author=False)
|
||||
|
||||
@bot.command(name="comfy-stats", aliases=["cstats"], extras={"category": "Utility"})
|
||||
async def comfy_stats_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Show GPU and system stats from the ComfyUI server.
|
||||
|
||||
Displays OS, Python version, and per-device VRAM usage reported
|
||||
by the ComfyUI ``/system_stats`` endpoint.
|
||||
|
||||
Usage:
|
||||
ttr!comfy-stats
|
||||
ttr!cstats
|
||||
"""
|
||||
if not hasattr(bot, "comfy") or not bot.comfy:
|
||||
await ctx.reply("ComfyUI client is not configured.", mention_author=False)
|
||||
return
|
||||
|
||||
stats = await bot.comfy.get_system_stats()
|
||||
if stats is None:
|
||||
await ctx.reply(
|
||||
"Could not reach the ComfyUI server to fetch stats.", mention_author=False
|
||||
)
|
||||
return
|
||||
|
||||
system = stats.get("system", {})
|
||||
devices = stats.get("devices", [])
|
||||
|
||||
lines = [
|
||||
f"**ComfyUI System Stats** — `{bot.comfy.server_address}`",
|
||||
f" OS : {system.get('os', 'N/A')}",
|
||||
f" Python : {system.get('python_version', 'N/A')}",
|
||||
]
|
||||
|
||||
if devices:
|
||||
lines.append("")
|
||||
lines.append("**Devices**")
|
||||
for dev in devices:
|
||||
name = dev.get("name", "unknown")
|
||||
vram_total = dev.get("vram_total", 0)
|
||||
vram_free = dev.get("vram_free", 0)
|
||||
vram_used = vram_total - vram_free
|
||||
|
||||
def _mb(b: int) -> str:
|
||||
return f"{b / 1024 / 1024:.0f} MB"
|
||||
|
||||
lines.append(
|
||||
f" {name} — {_mb(vram_used)} / {_mb(vram_total)} VRAM used"
|
||||
)
|
||||
else:
|
||||
lines.append(" No device info available.")
|
||||
|
||||
await ctx.reply("\n".join(lines), mention_author=False)
|
||||
|
||||
@bot.command(name="comfy-queue", aliases=["cqueue", "cq"], extras={"category": "Utility"})
|
||||
async def comfy_queue_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Show the ComfyUI server's internal queue state.
|
||||
|
||||
Displays jobs currently running and pending on the ComfyUI server
|
||||
itself (separate from the Discord bot's own job queue).
|
||||
|
||||
Usage:
|
||||
ttr!comfy-queue
|
||||
ttr!cq
|
||||
"""
|
||||
if not hasattr(bot, "comfy") or not bot.comfy:
|
||||
await ctx.reply("ComfyUI client is not configured.", mention_author=False)
|
||||
return
|
||||
|
||||
queue_data = await bot.comfy.get_comfy_queue()
|
||||
if queue_data is None:
|
||||
await ctx.reply(
|
||||
"Could not reach the ComfyUI server to fetch queue info.", mention_author=False
|
||||
)
|
||||
return
|
||||
|
||||
running = queue_data.get("queue_running", [])
|
||||
pending = queue_data.get("queue_pending", [])
|
||||
|
||||
lines = [
|
||||
f"**ComfyUI Server Queue** — `{bot.comfy.server_address}`",
|
||||
f" Running : {len(running)} job(s)",
|
||||
f" Pending : {len(pending)} job(s)",
|
||||
]
|
||||
|
||||
if running:
|
||||
lines.append("")
|
||||
lines.append("**Currently running**")
|
||||
for entry in running[:5]: # cap at 5 to avoid huge messages
|
||||
prompt_id = entry[1] if len(entry) > 1 else "unknown"
|
||||
lines.append(f" `{prompt_id}`")
|
||||
|
||||
if pending:
|
||||
lines.append("")
|
||||
lines.append(f"**Pending** (showing up to 5 of {len(pending)})")
|
||||
for entry in pending[:5]:
|
||||
prompt_id = entry[1] if len(entry) > 1 else "unknown"
|
||||
lines.append(f" `{prompt_id}`")
|
||||
|
||||
await ctx.reply("\n".join(lines), mention_author=False)
|
||||
100
commands/workflow.py
Normal file
100
commands/workflow.py
Normal file
@@ -0,0 +1,100 @@
|
||||
"""
|
||||
commands/workflow.py
|
||||
====================
|
||||
|
||||
Workflow management commands for the Discord ComfyUI bot.
|
||||
|
||||
This module contains commands for loading and managing workflow templates.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Dict
|
||||
|
||||
from discord.ext import commands
|
||||
|
||||
from discord_utils import require_comfy_client
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_workflow_commands(bot):
|
||||
"""
|
||||
Register workflow management commands with the bot.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bot : commands.Bot
|
||||
The Discord bot instance.
|
||||
"""
|
||||
|
||||
@bot.command(name="workflow-load", aliases=["workflowload", "wfl"], extras={"category": "Workflow"})
|
||||
@require_comfy_client
|
||||
async def load_workflow_command(ctx: commands.Context, *, path: Optional[str] = None) -> None:
|
||||
"""
|
||||
Load a ComfyUI workflow from a JSON file.
|
||||
|
||||
Usage:
|
||||
ttr!workflow-load path/to/workflow.json
|
||||
|
||||
You can also attach a JSON file to the command message instead of
|
||||
providing a path. The loaded workflow will replace the current
|
||||
workflow template used by the bot. After loading a workflow you
|
||||
can generate images with your prompts while reusing the loaded
|
||||
graph structure.
|
||||
"""
|
||||
workflow_data: Optional[Dict] = None
|
||||
|
||||
# Check for attached JSON file first
|
||||
for attachment in ctx.message.attachments:
|
||||
if attachment.filename.lower().endswith(".json"):
|
||||
raw = await attachment.read()
|
||||
try:
|
||||
text = raw.decode("utf-8")
|
||||
except UnicodeDecodeError as exc:
|
||||
await ctx.reply(
|
||||
f"`{attachment.filename}` is not valid UTF-8: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
try:
|
||||
workflow_data = json.loads(text)
|
||||
break
|
||||
except json.JSONDecodeError as exc:
|
||||
await ctx.reply(
|
||||
f"Failed to parse `{attachment.filename}` as JSON: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
# Otherwise try to load from provided path
|
||||
if workflow_data is None and path:
|
||||
try:
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
workflow_data = json.load(f)
|
||||
except FileNotFoundError:
|
||||
await ctx.reply(f"File not found: `{path}`", mention_author=False)
|
||||
return
|
||||
except json.JSONDecodeError as exc:
|
||||
await ctx.reply(f"Invalid JSON in `{path}`: {exc}", mention_author=False)
|
||||
return
|
||||
|
||||
if workflow_data is None:
|
||||
await ctx.reply(
|
||||
"Please provide a JSON workflow file either as an attachment or a path.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
# Set the workflow on the client
|
||||
try:
|
||||
bot.comfy.set_workflow(workflow_data)
|
||||
await ctx.reply("Workflow loaded successfully.", mention_author=False)
|
||||
except Exception as exc:
|
||||
await ctx.reply(
|
||||
f"Failed to set workflow: {type(exc).__name__}: {exc}",
|
||||
mention_author=False,
|
||||
)
|
||||
252
commands/workflow_changes.py
Normal file
252
commands/workflow_changes.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""
|
||||
commands/workflow_changes.py
|
||||
============================
|
||||
|
||||
Workflow override management commands for the Discord ComfyUI bot.
|
||||
|
||||
Works with any NodeInput.key discovered by WorkflowInspector — not just
|
||||
the four original hard-coded keys. Backward-compat aliases are preserved:
|
||||
``type:prompt``, ``type:negative_prompt``, ``type:input_image``,
|
||||
``type:seed``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from discord.ext import commands
|
||||
|
||||
from config import ARG_TYPE_KEY
|
||||
from discord_utils import require_comfy_client
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_workflow_changes_commands(bot):
|
||||
"""Register workflow changes commands with the bot."""
|
||||
|
||||
@bot.command(
|
||||
name="get-current-workflow-changes",
|
||||
aliases=["getworkflowchanges", "gcwc"],
|
||||
extras={"category": "Workflow"},
|
||||
)
|
||||
@require_comfy_client
|
||||
async def get_current_workflow_changes_command(
|
||||
ctx: commands.Context, *, args: str = ""
|
||||
) -> None:
|
||||
"""
|
||||
Show current workflow override values.
|
||||
|
||||
Usage::
|
||||
|
||||
ttr!get-current-workflow-changes type:all
|
||||
ttr!get-current-workflow-changes type:prompt
|
||||
ttr!get-current-workflow-changes type:<any_override_key>
|
||||
"""
|
||||
try:
|
||||
overrides = bot.comfy.state_manager.get_overrides()
|
||||
|
||||
if ARG_TYPE_KEY not in args:
|
||||
await ctx.reply(
|
||||
f"Use `{ARG_TYPE_KEY}all` to see all overrides, or "
|
||||
f"`{ARG_TYPE_KEY}<key>` for a specific key.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
param = args.split(ARG_TYPE_KEY, 1)[1].strip().lower()
|
||||
|
||||
if param == "all":
|
||||
if not overrides:
|
||||
await ctx.reply("No overrides set.", mention_author=False)
|
||||
return
|
||||
lines = [f"**{k}**: `{v}`" for k, v in sorted(overrides.items())]
|
||||
await ctx.reply(
|
||||
"Current overrides:\n" + "\n".join(lines),
|
||||
mention_author=False,
|
||||
)
|
||||
else:
|
||||
# Support multi-word value with the key as prefix
|
||||
key = param.split()[0] if " " in param else param
|
||||
val = overrides.get(key)
|
||||
if val is None:
|
||||
await ctx.reply(
|
||||
f"Override `{key}` is not set.",
|
||||
mention_author=False,
|
||||
)
|
||||
else:
|
||||
await ctx.reply(
|
||||
f"**{key}**: `{val}`",
|
||||
mention_author=False,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to get workflow overrides")
|
||||
await ctx.reply(f"An error occurred: {type(exc).__name__}: {exc}", mention_author=False)
|
||||
|
||||
@bot.command(
|
||||
name="set-current-workflow-changes",
|
||||
aliases=["setworkflowchanges", "scwc"],
|
||||
extras={"category": "Workflow"},
|
||||
)
|
||||
@require_comfy_client
|
||||
async def set_current_workflow_changes_command(
|
||||
ctx: commands.Context, *, args: str = ""
|
||||
) -> None:
|
||||
"""
|
||||
Set a workflow override value.
|
||||
|
||||
Supports any NodeInput.key discovered by WorkflowInspector as well
|
||||
as the legacy fixed keys.
|
||||
|
||||
Usage::
|
||||
|
||||
ttr!set-current-workflow-changes type:<key> <value>
|
||||
|
||||
Examples::
|
||||
|
||||
ttr!scwc type:prompt A beautiful landscape
|
||||
ttr!scwc type:negative_prompt blurry
|
||||
ttr!scwc type:input_image my_image.png
|
||||
ttr!scwc type:steps 30
|
||||
ttr!scwc type:cfg 7.5
|
||||
ttr!scwc type:seed 42
|
||||
"""
|
||||
try:
|
||||
if not args or ARG_TYPE_KEY not in args:
|
||||
await ctx.reply(
|
||||
f"Usage: `ttr!set-current-workflow-changes {ARG_TYPE_KEY}<key> <value>`",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
rest = args.split(ARG_TYPE_KEY, 1)[1]
|
||||
# Key is the first word; value is everything after the first space
|
||||
parts = rest.split(None, 1)
|
||||
if len(parts) < 2:
|
||||
await ctx.reply(
|
||||
"Please provide both a key and a value. "
|
||||
f"Example: `ttr!scwc {ARG_TYPE_KEY}prompt A cat`",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
key = parts[0].strip().lower()
|
||||
raw_value: str = parts[1].strip()
|
||||
|
||||
if not key:
|
||||
await ctx.reply("Key cannot be empty.", mention_author=False)
|
||||
return
|
||||
|
||||
# Type-coerce well-known numeric keys
|
||||
_int_keys = {"steps", "width", "height"}
|
||||
_float_keys = {"cfg", "denoise"}
|
||||
_seed_keys = {"seed", "noise_seed"}
|
||||
|
||||
value: object = raw_value
|
||||
try:
|
||||
if key in _int_keys:
|
||||
value = int(raw_value)
|
||||
elif key in _float_keys:
|
||||
value = float(raw_value)
|
||||
elif key in _seed_keys:
|
||||
value = int(raw_value)
|
||||
except ValueError:
|
||||
await ctx.reply(
|
||||
f"Invalid value for `{key}`: expected a number, got `{raw_value}`.",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
|
||||
bot.comfy.state_manager.set_override(key, value)
|
||||
await ctx.reply(
|
||||
f"Override **{key}** set to `{value}`.",
|
||||
mention_author=False,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to set workflow override")
|
||||
await ctx.reply(f"An error occurred: {type(exc).__name__}: {exc}", mention_author=False)
|
||||
|
||||
@bot.command(
|
||||
name="clear-workflow-change",
|
||||
aliases=["clearworkflowchange", "cwc"],
|
||||
extras={"category": "Workflow"},
|
||||
)
|
||||
@require_comfy_client
|
||||
async def clear_workflow_change_command(
|
||||
ctx: commands.Context, *, args: str = ""
|
||||
) -> None:
|
||||
"""
|
||||
Remove a single override key.
|
||||
|
||||
Usage::
|
||||
|
||||
ttr!clear-workflow-change type:<key>
|
||||
"""
|
||||
try:
|
||||
if ARG_TYPE_KEY not in args:
|
||||
await ctx.reply(
|
||||
f"Usage: `ttr!clear-workflow-change {ARG_TYPE_KEY}<key>`",
|
||||
mention_author=False,
|
||||
)
|
||||
return
|
||||
key = args.split(ARG_TYPE_KEY, 1)[1].strip().lower()
|
||||
bot.comfy.state_manager.delete_override(key)
|
||||
await ctx.reply(f"Override **{key}** cleared.", mention_author=False)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to clear override")
|
||||
await ctx.reply(f"An error occurred: {type(exc).__name__}: {exc}", mention_author=False)
|
||||
|
||||
@bot.command(
|
||||
name="set-seed",
|
||||
aliases=["setseed"],
|
||||
extras={"category": "Workflow"},
|
||||
)
|
||||
@require_comfy_client
|
||||
async def set_seed_command(ctx: commands.Context, *, args: str = "") -> None:
|
||||
"""
|
||||
Pin a specific seed for deterministic generation.
|
||||
|
||||
Usage::
|
||||
|
||||
ttr!set-seed 42
|
||||
"""
|
||||
seed_str = args.strip()
|
||||
if not seed_str:
|
||||
await ctx.reply("Usage: `ttr!set-seed <number>`", mention_author=False)
|
||||
return
|
||||
if not seed_str.isdigit():
|
||||
await ctx.reply("Seed must be a non-negative integer.", mention_author=False)
|
||||
return
|
||||
seed_val = int(seed_str)
|
||||
max_seed = 2 ** 32 - 1
|
||||
if seed_val > max_seed:
|
||||
await ctx.reply(f"Seed must be between 0 and {max_seed}.", mention_author=False)
|
||||
return
|
||||
try:
|
||||
bot.comfy.state_manager.set_seed(seed_val)
|
||||
await ctx.reply(f"Seed pinned to `{seed_val}`.", mention_author=False)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to set seed")
|
||||
await ctx.reply(f"An error occurred: {type(exc).__name__}: {exc}", mention_author=False)
|
||||
|
||||
@bot.command(
|
||||
name="clear-seed",
|
||||
aliases=["clearseed"],
|
||||
extras={"category": "Workflow"},
|
||||
)
|
||||
@require_comfy_client
|
||||
async def clear_seed_command(ctx: commands.Context) -> None:
|
||||
"""
|
||||
Clear the pinned seed and return to random generation.
|
||||
|
||||
Usage::
|
||||
|
||||
ttr!clear-seed
|
||||
"""
|
||||
try:
|
||||
bot.comfy.state_manager.clear_seed()
|
||||
await ctx.reply("Seed cleared; generation will now use random seeds.", mention_author=False)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to clear seed")
|
||||
await ctx.reply(f"An error occurred: {type(exc).__name__}: {exc}", mention_author=False)
|
||||
Reference in New Issue
Block a user