- Add Python FastAPI backend with Pydantic validation - Port WhiskClient and MetaAIClient to Python - Create API routers for all endpoints - Add Swagger/ReDoc documentation at /docs - Update Dockerfile for multi-service container - Add lib/api.ts frontend client - Update README for V3
113 lines
2.3 KiB
Python
113 lines
2.3 KiB
Python
"""
|
|
Pydantic response models
|
|
"""
|
|
from pydantic import BaseModel
|
|
from typing import Optional, List, Dict, Any
|
|
|
|
|
|
class GeneratedImage(BaseModel):
|
|
"""Single generated image"""
|
|
data: str # base64
|
|
index: Optional[int] = None
|
|
prompt: str
|
|
aspectRatio: str
|
|
|
|
|
|
class GenerateResponse(BaseModel):
|
|
"""Response from image generation"""
|
|
images: List[GeneratedImage]
|
|
|
|
|
|
class VideoResponse(BaseModel):
|
|
"""Response from video generation"""
|
|
success: bool
|
|
id: Optional[str] = None
|
|
url: Optional[str] = None
|
|
status: Optional[str] = None
|
|
|
|
|
|
class ReferenceUploadResponse(BaseModel):
|
|
"""Response from reference upload"""
|
|
success: bool
|
|
id: str
|
|
|
|
|
|
class MetaImageResult(BaseModel):
|
|
"""Meta AI generated image"""
|
|
data: Optional[str] = None
|
|
url: Optional[str] = None
|
|
prompt: str
|
|
model: str
|
|
aspectRatio: str = "1:1"
|
|
|
|
|
|
class MetaGenerateResponse(BaseModel):
|
|
"""Response from Meta AI generation"""
|
|
success: bool
|
|
images: List[MetaImageResult]
|
|
|
|
|
|
class MetaVideoResult(BaseModel):
|
|
"""Meta AI video result"""
|
|
url: str
|
|
prompt: str
|
|
|
|
|
|
class MetaVideoResponse(BaseModel):
|
|
"""Response from Meta AI video generation"""
|
|
success: bool
|
|
videos: List[MetaVideoResult]
|
|
conversation_id: Optional[str] = None
|
|
|
|
|
|
class Prompt(BaseModel):
|
|
"""Prompt library item"""
|
|
id: int
|
|
title: str
|
|
description: str
|
|
prompt: str
|
|
category: str
|
|
source: str
|
|
source_url: str
|
|
images: Optional[List[str]] = None
|
|
useCount: int = 0
|
|
lastUsedAt: Optional[int] = None
|
|
createdAt: Optional[int] = None
|
|
|
|
|
|
class PromptCache(BaseModel):
|
|
"""Prompt library cache"""
|
|
prompts: List[Prompt]
|
|
last_updated: Optional[str] = None
|
|
lastSync: Optional[int] = None
|
|
categories: Dict[str, List[str]] = {}
|
|
total_count: int = 0
|
|
sources: List[str] = []
|
|
|
|
|
|
class SyncResponse(BaseModel):
|
|
"""Response from prompt sync"""
|
|
success: bool
|
|
count: int
|
|
added: int
|
|
|
|
|
|
class HistoryItem(BaseModel):
|
|
"""Upload history item"""
|
|
id: str
|
|
url: str
|
|
originalName: str
|
|
category: str
|
|
mediaId: Optional[str] = None
|
|
createdAt: Optional[int] = None
|
|
|
|
|
|
class HistoryResponse(BaseModel):
|
|
"""Response from history endpoint"""
|
|
history: List[HistoryItem]
|
|
|
|
|
|
class ErrorResponse(BaseModel):
|
|
"""Standard error response"""
|
|
error: str
|
|
details: Optional[str] = None
|