Initial commit — ComfyUI Discord bot + web UI

Full source for the-third-rev: Discord bot (discord.py), FastAPI web UI
(React/TS/Vite/Tailwind), ComfyUI integration, generation history DB,
preset manager, workflow inspector, and all supporting modules.

Excluded from tracking: .env, invite_tokens.json, *.db (SQLite),
current-workflow-changes.json, user_settings/, presets/, logs/,
web-static/ (build output), frontend/node_modules/.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Khoa (Revenovich) Tran Gia
2026-03-02 09:55:48 +07:00
commit 1ed3c9ec4b
82 changed files with 20693 additions and 0 deletions

389
commands/generation.py Normal file
View File

@@ -0,0 +1,389 @@
"""
commands/generation.py
======================
Image and video generation commands for the Discord ComfyUI bot.
Jobs are submitted directly to ComfyUI (no internal SerialJobQueue).
ComfyUI's own queue handles ordering. Each Discord command waits for its
prompt_id to complete via WebSocket and then replies with the result.
"""
from __future__ import annotations
import asyncio
import logging
try:
import aiohttp # type: ignore
except Exception: # pragma: no cover
aiohttp = None # type: ignore
from io import BytesIO
from pathlib import Path
from typing import Optional
import discord
from discord.ext import commands
from config import ARG_PROMPT_KEY, ARG_NEG_PROMPT_KEY, ARG_QUEUE_KEY, MAX_IMAGES_PER_RESPONSE
from discord_utils import require_comfy_client, convert_image_bytes_to_discord_files
from media_uploader import flush_pending
logger = logging.getLogger(__name__)
async def _safe_reply(
ctx: commands.Context,
*,
content: str | None = None,
files: list[discord.File] | None = None,
mention_author: bool = True,
delete_after: float | None = None,
tries: int = 4,
base_delay: float = 1.0,
):
"""Reply to Discord with retries for transient network/Discord errors."""
delay = base_delay
last_exc: Exception | None = None
for attempt in range(1, tries + 1):
try:
return await ctx.reply(
content=content,
files=files or [],
mention_author=mention_author,
delete_after=delete_after,
)
except Exception as exc: # noqa: BLE001
last_exc = exc
transient = False
if isinstance(exc, asyncio.TimeoutError):
transient = True
elif isinstance(exc, OSError) and getattr(exc, "winerror", None) in {
64, 121, 1231, 10053, 10054,
}:
transient = True
if aiohttp is not None:
try:
if isinstance(exc, (aiohttp.ClientOSError, aiohttp.ClientConnectionError)):
transient = True
except Exception:
pass
if isinstance(exc, discord.HTTPException):
status = getattr(exc, "status", None)
if status is None or status >= 500 or status == 429:
transient = True
if (not transient) or attempt == tries:
raise
logger.warning(
"Transient error sending Discord message (attempt %d/%d): %s: %s",
attempt, tries, type(exc).__name__, exc,
)
await asyncio.sleep(delay)
delay *= 2
raise last_exc # type: ignore[misc]
def _seed_line(bot) -> str:
"""Return a formatted seed line if a seed was tracked, else empty string."""
seed = getattr(bot.comfy, "last_seed", None)
return f"\nSeed: `{seed}`" if seed is not None else ""
async def _run_generate(ctx: commands.Context, bot, prompt_text: str, negative_text: Optional[str]):
"""Execute a prompt-based generation and reply with results."""
images, prompt_id = await bot.comfy.generate_image(
prompt_text, negative_text,
source="discord", user_label=ctx.author.display_name,
)
if not images:
await ctx.reply(
"No images were generated. Please try again with a different prompt.",
mention_author=False,
)
return
files = convert_image_bytes_to_discord_files(
images, max_files=MAX_IMAGES_PER_RESPONSE, prefix="generated"
)
response_text = f"Generated {len(images)} image(s). Prompt ID: `{prompt_id}`{_seed_line(bot)}"
await _safe_reply(ctx, content=response_text, files=files, mention_author=True)
asyncio.create_task(flush_pending(
Path(bot.config.comfy_output_path),
bot.config.media_upload_user,
bot.config.media_upload_pass,
))
async def _run_workflow(ctx: commands.Context, bot, config):
"""Execute a workflow-based generation and reply with results."""
logger.info("Executing workflow generation")
await ctx.reply("Executing workflow…", mention_author=False, delete_after=5.0)
images, videos, prompt_id = await bot.comfy.generate_image_with_workflow(
source="discord", user_label=ctx.author.display_name,
)
if not images and not videos:
await ctx.reply(
"No images or videos were generated. Check the workflow and ComfyUI logs.",
mention_author=False,
)
return
seed_info = _seed_line(bot)
if videos:
output_path = config.comfy_output_path
video_file = None
for video_info in videos:
video_name = video_info.get("video_name")
video_subfolder = video_info.get("video_subfolder", "")
if video_name:
video_path = (
Path(output_path) / video_subfolder / video_name
if video_subfolder
else Path(output_path) / video_name
)
try:
video_file = discord.File(
BytesIO(video_path.read_bytes()), filename=video_name
)
break
except Exception as exc:
logger.exception("Failed to read video %s: %s", video_path, exc)
if video_file:
response_text = (
f"Generated {len(images)} image(s) and a video. "
f"Prompt ID: `{prompt_id}`{seed_info}"
)
await _safe_reply(ctx, content=response_text, files=[video_file], mention_author=True)
else:
await ctx.reply(
f"Generated output but failed to read video file. "
f"Prompt ID: `{prompt_id}`{seed_info}",
mention_author=True,
)
else:
files = convert_image_bytes_to_discord_files(
images, max_files=MAX_IMAGES_PER_RESPONSE, prefix="generated"
)
response_text = (
f"Generated {len(images)} image(s) using workflow. "
f"Prompt ID: `{prompt_id}`{seed_info}"
)
await _safe_reply(ctx, content=response_text, files=files, mention_author=True)
asyncio.create_task(flush_pending(
Path(config.comfy_output_path),
config.media_upload_user,
config.media_upload_pass,
))
def setup_generation_commands(bot, config):
"""Register generation commands with the bot."""
@bot.command(name="test", extras={"category": "Generation"})
async def test_command(ctx: commands.Context) -> None:
"""A simple test command to verify the bot is working."""
await ctx.reply(
"The bot is working! Use `ttr!generate` to create images.",
mention_author=False,
)
@bot.command(name="generate", aliases=["gen"], extras={"category": "Generation"})
@require_comfy_client
async def generate(ctx: commands.Context, *, args: str = "") -> None:
"""
Generate images using ComfyUI.
Usage::
ttr!generate prompt:<your prompt> negative_prompt:<your negatives>
The ``prompt:`` keyword is required. ``negative_prompt:`` is optional.
"""
prompt_text: Optional[str] = None
negative_text: Optional[str] = None
if args:
if ARG_PROMPT_KEY in args:
parts = args.split(ARG_PROMPT_KEY, 1)[1]
if ARG_NEG_PROMPT_KEY in parts:
p, n = parts.split(ARG_NEG_PROMPT_KEY, 1)
prompt_text = p.strip()
negative_text = n.strip() or None
else:
prompt_text = parts.strip()
else:
prompt_text = args.strip()
if not prompt_text:
await ctx.reply(
f"Please specify a prompt: `{ARG_PROMPT_KEY}<your prompt>`.",
mention_author=False,
)
return
bot.last_gen = {"mode": "prompt", "prompt": prompt_text, "negative": negative_text}
try:
# Show queue position from ComfyUI before waiting
depth = await bot.comfy.get_queue_depth()
pos = depth + 1
ack = await ctx.reply(
f"Queued ✅ (ComfyUI position: ~{pos})",
mention_author=False,
delete_after=30.0,
)
await _run_generate(ctx, bot, prompt_text, negative_text)
except Exception as exc:
logger.exception("Error generating image")
await ctx.reply(
f"An error occurred: {type(exc).__name__}: {exc}",
mention_author=False,
)
@bot.command(
name="workflow-gen",
aliases=["workflow-generate", "wfg"],
extras={"category": "Generation"},
)
@require_comfy_client
async def generate_workflow_command(ctx: commands.Context, *, args: str = "") -> None:
"""
Generate using the currently loaded workflow template.
Usage::
ttr!workflow-gen
ttr!workflow-gen queue:<number>
"""
bot.last_gen = {"mode": "workflow", "prompt": None, "negative": None}
# Handle batch queue parameter
if ARG_QUEUE_KEY in args:
number_part = args.split(ARG_QUEUE_KEY, 1)[1].strip()
if number_part.isdigit():
queue_times = int(number_part)
if queue_times > 1:
await ctx.reply(
f"Queuing {queue_times} workflow runs…",
mention_author=False,
)
for i in range(queue_times):
try:
depth = await bot.comfy.get_queue_depth()
pos = depth + 1
await ctx.reply(
f"Queued run {i+1}/{queue_times} ✅ (ComfyUI position: ~{pos})",
mention_author=False,
delete_after=30.0,
)
await _run_workflow(ctx, bot, config)
except Exception as exc:
logger.exception("Error on workflow run %d", i + 1)
await ctx.reply(
f"Error on run {i+1}: {type(exc).__name__}: {exc}",
mention_author=False,
)
return
else:
await ctx.reply(
"Please provide a number greater than 1 for queueing multiple runs.",
mention_author=False,
delete_after=30.0,
)
return
else:
await ctx.reply(
f"Invalid queue parameter. Use `{ARG_QUEUE_KEY}<number>`.",
mention_author=False,
delete_after=30.0,
)
return
try:
depth = await bot.comfy.get_queue_depth()
pos = depth + 1
await ctx.reply(
f"Queued ✅ (ComfyUI position: ~{pos})",
mention_author=False,
delete_after=30.0,
)
await _run_workflow(ctx, bot, config)
except Exception as exc:
logger.exception("Error generating with workflow")
await ctx.reply(
f"An error occurred: {type(exc).__name__}: {exc}",
mention_author=False,
)
@bot.command(name="rerun", aliases=["rr"], extras={"category": "Generation"})
@require_comfy_client
async def rerun_command(ctx: commands.Context) -> None:
"""
Re-run the last generation with the same parameters.
Re-submits the most recent ``ttr!generate`` or ``ttr!workflow-gen``
with the same mode and prompt. Current state overrides (seed,
input_image, etc.) are applied at execution time.
"""
last = getattr(bot, "last_gen", None)
if last is None:
await ctx.reply(
"No previous generation to rerun.",
mention_author=False,
)
return
try:
depth = await bot.comfy.get_queue_depth()
pos = depth + 1
await ctx.reply(
f"Rerun queued ✅ (ComfyUI position: ~{pos})",
mention_author=False,
delete_after=30.0,
)
if last["mode"] == "prompt":
await _run_generate(ctx, bot, last["prompt"], last["negative"])
else:
await _run_workflow(ctx, bot, config)
except Exception as exc:
logger.exception("Error queueing rerun")
await ctx.reply(
f"An error occurred: {type(exc).__name__}: {exc}",
mention_author=False,
)
@bot.command(name="cancel", extras={"category": "Generation"})
@require_comfy_client
async def cancel_command(ctx: commands.Context) -> None:
"""
Clear all pending jobs from the ComfyUI queue.
Usage::
ttr!cancel
"""
try:
ok = await bot.comfy.clear_queue()
if ok:
await ctx.reply("ComfyUI queue cleared.", mention_author=False)
else:
await ctx.reply(
"Failed to clear the ComfyUI queue (server may have returned an error).",
mention_author=False,
)
except Exception as exc:
await ctx.reply(f"Error: {exc}", mention_author=False)