apix/services/crawl4ai/app/models.py
Khoa.vo 0f87b8ef99
Some checks are pending
CI / build (18.x) (push) Waiting to run
CI / build (20.x) (push) Waiting to run
feat: add Meta AI video generation
- Add /video/generate endpoint to crawl4ai Python service
- Add VideoGenerateRequest and VideoGenerateResponse models
- Add generateVideo method to MetaCrawlClient TypeScript client
- Add /api/meta/video Next.js API route
- Add 'Video' button in PromptHero UI (visible only for Meta AI provider)
- Blue/cyan gradient styling for Video button to differentiate from Generate
2026-01-06 13:52:31 +07:00

87 lines
2.4 KiB
Python

"""
Pydantic models for request/response schemas
"""
from pydantic import BaseModel, Field
from typing import Optional
from enum import Enum
class TaskStatus(str, Enum):
PENDING = "pending"
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
class GenerateRequest(BaseModel):
"""Request model for image generation"""
prompt: str = Field(..., description="Image generation prompt", min_length=1)
cookies: str = Field(..., description="Meta AI session cookies")
num_images: int = Field(default=4, ge=1, le=8, description="Number of images to generate")
class GrokChatRequest(BaseModel):
"""Request model for Grok chat"""
message: str = Field(..., description="Message content")
history: Optional[list] = Field(default=None, description="Chat history")
cookies: Optional[dict] = Field(default=None, description="Grok session cookies")
user_agent: Optional[str] = Field(default=None, description="Browser User-Agent")
class ImageResult(BaseModel):
"""Single generated image result"""
url: str
data: Optional[str] = None # base64 encoded image data
prompt: str
model: str = "imagine"
class GenerateResponse(BaseModel):
"""Response model for image generation"""
success: bool
images: list[ImageResult] = []
error: Optional[str] = None
task_id: Optional[str] = None
class GrokChatResponse(BaseModel):
"""Response model for Grok chat"""
response: str
error: Optional[str] = None
class TaskStatusResponse(BaseModel):
"""Response model for async task status"""
task_id: str
status: TaskStatus
images: list[ImageResult] = []
error: Optional[str] = None
progress: Optional[int] = None # 0-100
class HealthResponse(BaseModel):
"""Health check response"""
status: str = "healthy"
version: str = "1.0.0"
browser_ready: bool = True
class VideoGenerateRequest(BaseModel):
"""Request model for video generation"""
prompt: str = Field(..., description="Video generation prompt", min_length=1)
cookies: str = Field(..., description="Meta AI session cookies")
class VideoResult(BaseModel):
"""Single generated video result"""
url: str
prompt: str
model: str = "meta_video"
class VideoGenerateResponse(BaseModel):
"""Response model for video generation"""
success: bool
videos: list[VideoResult] = []
error: Optional[str] = None
conversation_id: Optional[str] = None