- Add Python FastAPI backend with Pydantic validation - Port WhiskClient and MetaAIClient to Python - Create API routers for all endpoints - Add Swagger/ReDoc documentation at /docs - Update Dockerfile for multi-service container - Add lib/api.ts frontend client - Update README for V3
177 lines
4.6 KiB
Python
177 lines
4.6 KiB
Python
"""
|
|
Prompts Router - Prompt library management
|
|
"""
|
|
from fastapi import APIRouter, HTTPException
|
|
from models.requests import PromptUseRequest, PromptUploadRequest
|
|
from models.responses import PromptCache, SyncResponse, ErrorResponse
|
|
from services.prompts_service import (
|
|
get_prompts,
|
|
sync_prompts,
|
|
track_prompt_use,
|
|
upload_prompt_image
|
|
)
|
|
|
|
router = APIRouter(prefix="/prompts", tags=["Prompts"])
|
|
|
|
|
|
@router.get(
|
|
"",
|
|
response_model=PromptCache,
|
|
responses={500: {"model": ErrorResponse}}
|
|
)
|
|
async def list_prompts():
|
|
"""
|
|
Get all prompts from the library.
|
|
|
|
Returns cached prompts with metadata including:
|
|
- All prompts with titles, descriptions, and content
|
|
- Categories and sources
|
|
- Last sync timestamp
|
|
|
|
Triggers background sync if last sync was more than 1 hour ago.
|
|
"""
|
|
try:
|
|
cache = await get_prompts()
|
|
|
|
# Lazy Auto-Crawl: Check if sync is needed (every 1 hour)
|
|
ONE_HOUR = 60 * 60 * 1000
|
|
last_sync = cache.last_sync or 0
|
|
|
|
import time
|
|
if int(time.time() * 1000) - last_sync > ONE_HOUR:
|
|
print("[Auto-Crawl] Triggering background sync...")
|
|
# Fire and forget - don't await
|
|
import asyncio
|
|
asyncio.create_task(sync_prompts())
|
|
|
|
return cache.to_dict()
|
|
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail="Failed to load prompts")
|
|
|
|
|
|
@router.post(
|
|
"/sync",
|
|
response_model=SyncResponse,
|
|
responses={500: {"model": ErrorResponse}}
|
|
)
|
|
async def sync_prompts_endpoint():
|
|
"""
|
|
Manually trigger a sync of prompts from all sources.
|
|
|
|
Crawls prompt sources and merges with existing prompts.
|
|
Returns count of total and newly added prompts.
|
|
"""
|
|
try:
|
|
result = await sync_prompts()
|
|
return SyncResponse(**result)
|
|
except Exception as e:
|
|
print(f"Sync failed: {e}")
|
|
raise HTTPException(status_code=500, detail="Sync failed")
|
|
|
|
|
|
@router.post(
|
|
"/use",
|
|
responses={
|
|
404: {"model": ErrorResponse},
|
|
500: {"model": ErrorResponse}
|
|
}
|
|
)
|
|
async def use_prompt(request: PromptUseRequest):
|
|
"""
|
|
Track usage of a prompt.
|
|
|
|
Increments the use count and updates lastUsedAt timestamp.
|
|
"""
|
|
try:
|
|
prompt = await track_prompt_use(request.promptId)
|
|
if not prompt:
|
|
raise HTTPException(status_code=404, detail="Prompt not found")
|
|
|
|
return {
|
|
"success": True,
|
|
"promptId": prompt.id,
|
|
"useCount": prompt.use_count,
|
|
"lastUsedAt": prompt.last_used_at
|
|
}
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
@router.post(
|
|
"/upload",
|
|
responses={
|
|
404: {"model": ErrorResponse},
|
|
500: {"model": ErrorResponse}
|
|
}
|
|
)
|
|
async def upload_image(request: PromptUploadRequest):
|
|
"""
|
|
Upload a thumbnail image for a prompt.
|
|
|
|
Stores the base64 image data with the prompt.
|
|
"""
|
|
try:
|
|
prompt = await upload_prompt_image(request.promptId, request.imageBase64)
|
|
if not prompt:
|
|
raise HTTPException(status_code=404, detail="Prompt not found")
|
|
|
|
return {
|
|
"success": True,
|
|
"promptId": prompt.id,
|
|
"imageCount": len(prompt.images) if prompt.images else 0
|
|
}
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
@router.post(
|
|
"/generate",
|
|
responses={
|
|
400: {"model": ErrorResponse},
|
|
404: {"model": ErrorResponse},
|
|
500: {"model": ErrorResponse}
|
|
}
|
|
)
|
|
async def generate_from_prompt(
|
|
promptId: int,
|
|
aspectRatio: str = "1:1",
|
|
cookies: str = None
|
|
):
|
|
"""
|
|
Generate images using a prompt from the library.
|
|
|
|
This is a convenience endpoint that:
|
|
1. Fetches the prompt by ID
|
|
2. Calls the generate endpoint with the prompt content
|
|
"""
|
|
try:
|
|
cache = await get_prompts()
|
|
|
|
# Find the prompt
|
|
prompt = None
|
|
for p in cache.prompts:
|
|
if p.id == promptId:
|
|
prompt = p
|
|
break
|
|
|
|
if not prompt:
|
|
raise HTTPException(status_code=404, detail="Prompt not found")
|
|
|
|
# Track usage
|
|
await track_prompt_use(promptId)
|
|
|
|
# Return prompt info for frontend to generate
|
|
return {
|
|
"success": True,
|
|
"prompt": prompt.to_dict()
|
|
}
|
|
|
|
except HTTPException:
|
|
raise
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=str(e))
|