chore: Remove Grok integration, simplify Settings UI
Some checks are pending
CI / build (18.x) (push) Waiting to run
CI / build (20.x) (push) Waiting to run

- Removed all Grok-related code, API routes, and services
- Removed crawl4ai service and meta-crawl client
- Simplified Settings to always show cookie inputs for Meta AI
- Hid advanced wrapper settings behind collapsible section
- Provider selection now only shows Whisk and Meta AI
- Fixed unused imports and type definitions
This commit is contained in:
Khoa.vo 2026-01-07 19:21:51 +07:00
parent 537b1b80e5
commit e69c6ba64d
42 changed files with 2892 additions and 4778 deletions

View file

@ -1,51 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
const CRAWL_SERVICE_URL = 'http://127.0.0.1:8000';
export async function POST(req: NextRequest) {
try {
const body = await req.json();
const { message, history } = body;
console.log(`[Grok API] Incoming body:`, JSON.stringify(body, null, 2));
const proxyPayload = {
message,
history,
cookies: body.cookies
};
console.log(`[Grok API] Proxy payload:`, JSON.stringify(proxyPayload, null, 2));
const response = await fetch(`${CRAWL_SERVICE_URL}/grok/chat`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(proxyPayload),
});
if (!response.ok) {
const errorText = await response.text();
console.error(`[Grok API] Service error: ${response.status} ${errorText}`);
try {
const errorJson = JSON.parse(errorText);
return NextResponse.json(errorJson, { status: response.status });
} catch {
return NextResponse.json(
{ error: `Service error: ${response.status} - ${errorText}` },
{ status: response.status }
);
}
}
const data = await response.json();
return NextResponse.json(data);
} catch (error: any) {
console.error('[Grok API] Proxy error:', error);
return NextResponse.json(
{ error: error.message || 'Internal Server Error' },
{ status: 500 }
);
}
}

104
app/api/grok-image/route.ts Normal file
View file

@ -0,0 +1,104 @@
import { NextRequest, NextResponse } from 'next/server';
/**
* Grok Image Generation API
* Uses xLmiler/grok2api_python backend with OpenAI-compatible format
*/
export async function POST(req: NextRequest) {
try {
const body = await req.json();
const { prompt, numImages = 1, grokApiUrl, apiKey, sso } = body;
if (!prompt) {
return NextResponse.json({ error: "Prompt is required" }, { status: 400 });
}
if (!grokApiUrl) {
return NextResponse.json({ error: "Grok API URL not configured" }, { status: 400 });
}
console.log(`[Grok Image] Generating ${numImages} image(s) for: "${prompt.substring(0, 50)}..."`);
// Call xLmiler backend using OpenAI-compatible format
const headers: Record<string, string> = {
'Content-Type': 'application/json',
};
if (apiKey) {
headers['Authorization'] = `Bearer ${apiKey}`;
}
const response = await fetch(`${grokApiUrl}/v1/chat/completions`, {
method: 'POST',
headers,
body: JSON.stringify({
model: 'grok-4-imageGen',
messages: [
{
role: 'user',
content: prompt
}
],
// The xLmiler backend handles image generation via chat completions
})
});
if (!response.ok) {
const errorText = await response.text();
console.error('[Grok Image] Error:', response.status, errorText);
return NextResponse.json(
{ error: `Grok API Error: ${response.status} - ${errorText.substring(0, 200)}` },
{ status: response.status }
);
}
const data = await response.json();
console.log('[Grok Image] Response:', JSON.stringify(data, null, 2).substring(0, 500));
// Extract image URLs from the response
// xLmiler returns images in the message content or as markdown images
const content = data.choices?.[0]?.message?.content || '';
// Parse image URLs from markdown format: ![...](url) or direct URLs
const imageUrls: string[] = [];
// Match markdown image syntax
const mdImageRegex = /!\[.*?\]\((https?:\/\/[^\s)]+)\)/g;
let match;
while ((match = mdImageRegex.exec(content)) !== null) {
imageUrls.push(match[1]);
}
// Also match direct URLs
const urlRegex = /https:\/\/[^\s"'<>]+\.(png|jpg|jpeg|webp|gif)/gi;
while ((match = urlRegex.exec(content)) !== null) {
if (!imageUrls.includes(match[0])) {
imageUrls.push(match[0]);
}
}
// Return images in our standard format
const images = imageUrls.slice(0, numImages).map(url => ({
url,
prompt,
model: 'grok-4-imageGen'
}));
if (images.length === 0) {
// Return the raw content for debugging
return NextResponse.json({
error: "No images found in response",
rawContent: content.substring(0, 500)
}, { status: 500 });
}
return NextResponse.json({ images });
} catch (error: any) {
console.error('[Grok Image] Error:', error);
return NextResponse.json(
{ error: error.message || 'Generation failed' },
{ status: 500 }
);
}
}

View file

@ -1,60 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { GrokClient } from '@/lib/providers/grok-client';
export async function POST(req: NextRequest) {
try {
const { prompt, apiKey, cookies, imageCount = 1 } = await req.json();
if (!prompt) {
return NextResponse.json({ error: "Prompt is required" }, { status: 400 });
}
if (!apiKey && !cookies) {
return NextResponse.json(
{ error: "Grok API key or cookies required. Configure in Settings." },
{ status: 401 }
);
}
console.log(`[Grok API Route] Generating ${imageCount} image(s) for: "${prompt.substring(0, 30)}..."`);
const client = new GrokClient({ apiKey, cookies });
const results = await client.generate(prompt, imageCount);
// Download images as base64 for storage
const images = await Promise.all(
results.map(async (img) => {
let base64 = img.data;
if (!base64 && img.url && !img.url.startsWith('data:')) {
try {
base64 = await client.downloadAsBase64(img.url);
} catch (e) {
console.warn("[Grok API Route] Failed to download image:", e);
}
}
return {
data: base64 || '',
url: img.url,
prompt: img.prompt,
model: img.model,
aspectRatio: '1:1' // Grok default
};
})
);
const validImages = images.filter(img => img.data || img.url);
if (validImages.length === 0) {
throw new Error("No valid images generated");
}
return NextResponse.json({ images: validImages });
} catch (error: any) {
console.error("[Grok API Route] Error:", error);
return NextResponse.json(
{ error: error.message || "Grok generation failed" },
{ status: 500 }
);
}
}

208
app/api/meta-chat/route.ts Normal file
View file

@ -0,0 +1,208 @@
import { NextRequest, NextResponse } from 'next/server';
import { MetaAIClient } from '@/lib/providers/meta-client';
/**
* Meta AI Chat API
* Uses MetaAIClient directly (GraphQL-based) for Llama 3 chat
* No external Crawl4AI service needed
*/
export async function POST(req: NextRequest) {
try {
const body = await req.json();
const { message, history, metaCookies } = body;
if (!message) {
return NextResponse.json({ error: "Message is required" }, { status: 400 });
}
if (!metaCookies) {
return NextResponse.json(
{ error: "Meta AI cookies required. Configure in Settings." },
{ status: 401 }
);
}
console.log(`[Meta Chat] Message: "${message.substring(0, 50)}..."`);
// Use MetaAIClient to send chat message
// The Meta AI API is primarily designed for image generation,
// but we can use it for text chat by not prefixing with "Imagine"
const client = new MetaAIClient({ cookies: metaCookies });
// For chat, we need to initialize session and send via GraphQL
// Since MetaAIClient.generate() adds "Imagine" prefix for images,
// we'll create a direct chat method or adapt the prompt
// Send message directly - the response will contain text, not images
const chatPrompt = message; // Don't add "Imagine" prefix for chat
try {
// Use the internal sendPrompt mechanism via generate
// But we'll extract the text response instead of images
const response = await sendMetaChatMessage(client, chatPrompt, metaCookies);
return NextResponse.json({ response });
} catch (chatError: any) {
// If the direct approach fails, provide helpful error
throw new Error(chatError.message || "Failed to get response from Meta AI");
}
} catch (error: any) {
console.error('[Meta Chat] Error:', error);
const msg = error.message || "";
const isAuthError = msg.includes("401") || msg.includes("cookies") ||
msg.includes("expired") || msg.includes("Login");
return NextResponse.json(
{ error: error.message || 'Internal Server Error' },
{ status: isAuthError ? 401 : 500 }
);
}
}
/**
* Send a chat message to Meta AI and extract text response
*/
async function sendMetaChatMessage(client: MetaAIClient, message: string, cookies: string): Promise<string> {
const META_AI_BASE = "https://www.meta.ai";
const GRAPHQL_ENDPOINT = `${META_AI_BASE}/api/graphql/`;
// Normalize cookies from JSON array to string format
const normalizedCookies = normalizeCookies(cookies);
// First we need to get session tokens
const sessionResponse = await fetch(META_AI_BASE, {
headers: {
"Cookie": normalizedCookies,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
}
});
const html = await sessionResponse.text();
// Extract LSD token
const lsdMatch = html.match(/\"LSD\",\[\],\{\"token\":\"([^\"]+)\"/) ||
html.match(/\"lsd\":\"([^\"]+)\"/) ||
html.match(/name=\"lsd\" value=\"([^\"]+)\"/);
const lsd = lsdMatch?.[1] || '';
// Extract access token
const tokenMatch = html.match(/\"accessToken\":\"([^\"]+)\"/);
const accessToken = tokenMatch?.[1];
if (html.includes('login_form') || html.includes('login_page')) {
throw new Error("Meta AI: Cookies expired. Please update in Settings.");
}
// Send chat message
const variables = {
message: {
text: message,
content_type: "TEXT"
},
source: "PDT_CHAT_INPUT",
external_message_id: Math.random().toString(36).substring(2) + Date.now().toString(36)
};
const body = new URLSearchParams({
fb_api_caller_class: "RelayModern",
fb_api_req_friendly_name: "useAbraSendMessageMutation",
variables: JSON.stringify(variables),
doc_id: "7783822248314888",
...(lsd && { lsd }),
});
const response = await fetch(GRAPHQL_ENDPOINT, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": normalizedCookies,
"Origin": META_AI_BASE,
"Referer": `${META_AI_BASE}/`,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Accept": "application/json",
"Accept-Language": "en-US,en;q=0.9",
...(accessToken && { "Authorization": `OAuth ${accessToken}` })
},
body: body.toString()
});
// Get response text first
const responseText = await response.text();
// Check if response is HTML (error page)
if (responseText.trim().startsWith('<') || responseText.includes('<!DOCTYPE')) {
console.error('[Meta Chat] Received HTML instead of JSON:', responseText.substring(0, 200));
if (responseText.includes('login') || responseText.includes('checkpoint')) {
throw new Error("Meta AI: Cookies expired or account requires verification. Please update cookies in Settings.");
}
throw new Error("Meta AI: Service returned an error page. Please try again later.");
}
if (!response.ok) {
console.error('[Meta Chat] Response not OK:', response.status, responseText.substring(0, 200));
throw new Error(`Meta AI Error: ${response.status}`);
}
// Parse JSON response
let data;
try {
data = JSON.parse(responseText);
} catch (e) {
console.error('[Meta Chat] Failed to parse JSON:', responseText.substring(0, 200));
throw new Error("Meta AI: Invalid response format. Please try again.");
}
// Extract text response from the GraphQL response
const messageData = data?.data?.node?.bot_response_message ||
data?.data?.xabraAIPreviewMessageSendMutation?.message;
// Check for GraphQL errors
if (data?.errors) {
console.error('[Meta Chat] GraphQL errors:', JSON.stringify(data.errors));
throw new Error("Meta AI: Request failed. " + (data.errors[0]?.message || "Unknown error"));
}
const textResponse = messageData?.text ||
messageData?.snippet ||
messageData?.message_text ||
"I'm sorry, I couldn't generate a response.";
return textResponse;
}
/**
* Normalize cookies from JSON array format to header string format
* Handles: [{name: "foo", value: "bar"}, ...] -> "foo=bar; ..."
*/
function normalizeCookies(cookies: string): string {
if (!cookies) return '';
try {
const trimmed = cookies.trim();
// Check if it's JSON array
if (trimmed.startsWith('[')) {
const parsed = JSON.parse(trimmed);
if (Array.isArray(parsed)) {
return parsed
.map((c: any) => `${c.name}=${c.value}`)
.join('; ');
}
}
// Check if it's JSON object with multiple arrays (merged cookies)
if (trimmed.startsWith('{')) {
const parsed = JSON.parse(trimmed);
// If it's an object, iterate values
const entries = Object.entries(parsed)
.map(([k, v]) => `${k}=${v}`)
.join('; ');
return entries;
}
} catch (e) {
// Not JSON, assume it's already a string format
}
return cookies;
}

View file

@ -1,130 +0,0 @@
import { NextRequest, NextResponse } from 'next/server';
import { MetaCrawlClient } from '@/lib/providers/meta-crawl-client';
/**
* API Route: /api/meta-crawl
*
* Proxies image generation requests to the Crawl4AI Python service
* which uses browser automation to interact with Meta AI.
*/
const client = new MetaCrawlClient();
export async function POST(req: NextRequest) {
try {
const body = await req.json();
// Support both numImages (camelCase) and num_images (snake_case)
const { prompt, cookies, numImages, num_images, async = false } = body;
const imageCount = num_images || numImages || 4;
if (!prompt) {
return NextResponse.json(
{ error: "Prompt is required" },
{ status: 400 }
);
}
if (!cookies) {
return NextResponse.json(
{ error: "Meta AI cookies are required. Please configure in settings." },
{ status: 401 }
);
}
// Check if service is healthy
const isHealthy = await client.healthCheck();
if (!isHealthy) {
return NextResponse.json(
{ error: "Crawl4AI service is not available. Please try again later." },
{ status: 503 }
);
}
if (async) {
// Async mode: return task_id for polling
const taskId = await client.generateAsync(prompt, cookies, imageCount);
return NextResponse.json({
success: true,
task_id: taskId
});
}
// Sync mode: wait for completion
console.log(`[MetaCrawl API] Generating images for: "${prompt.substring(0, 50)}..."`);
const images = await client.generate(prompt, cookies, imageCount);
return NextResponse.json({
success: true,
images: images.map(img => ({
url: img.url,
data: img.data,
prompt: img.prompt,
model: img.model
}))
});
} catch (error: any) {
console.error("[MetaCrawl API] Error:", error);
return NextResponse.json(
{ error: error.message || "Image generation failed" },
{ status: 500 }
);
}
}
/**
* GET /api/meta-crawl?task_id=xxx
*
* Get status of an async generation task
*/
export async function GET(req: NextRequest) {
const taskId = req.nextUrl.searchParams.get('task_id');
if (!taskId) {
// Return rate limit status
try {
const status = await client.getRateLimitStatus();
return NextResponse.json(status);
} catch {
return NextResponse.json({ error: "Service not available" }, { status: 503 });
}
}
try {
const status = await client.getTaskStatus(taskId);
return NextResponse.json(status);
} catch (error: any) {
return NextResponse.json(
{ error: error.message },
{ status: error.message === 'Task not found' ? 404 : 500 }
);
}
}
/**
* DELETE /api/meta-crawl?task_id=xxx
*
* Clean up a completed task
*/
export async function DELETE(req: NextRequest) {
const taskId = req.nextUrl.searchParams.get('task_id');
if (!taskId) {
return NextResponse.json({ error: "task_id is required" }, { status: 400 });
}
try {
const response = await fetch(`${process.env.CRAWL4AI_URL || 'http://localhost:8000'}/status/${taskId}`, {
method: 'DELETE'
});
if (!response.ok) {
return NextResponse.json({ error: "Failed to delete task" }, { status: response.status });
}
return NextResponse.json({ deleted: true });
} catch (error: any) {
return NextResponse.json({ error: error.message }, { status: 500 });
}
}

View file

@ -3,23 +3,39 @@ import { MetaAIClient } from '@/lib/providers/meta-client';
export async function POST(req: NextRequest) {
try {
const { prompt, cookies, imageCount = 4 } = await req.json();
const { prompt, cookies, imageCount = 4, aspectRatio = 'portrait', useMetaFreeWrapper, metaFreeWrapperUrl } = await req.json();
if (!prompt) {
return NextResponse.json({ error: "Prompt is required" }, { status: 400 });
}
if (!cookies) {
// Only check for cookies if NOT using free wrapper
if (!useMetaFreeWrapper && !cookies) {
return NextResponse.json(
{ error: "Meta AI cookies required. Configure in Settings." },
{ error: "Meta AI cookies required. Configure in Settings or use Free Wrapper." },
{ status: 401 }
);
}
console.log(`[Meta AI Route] Generating images for: "${prompt.substring(0, 30)}..."`);
console.log(`[Meta AI Route] Generating images for: "${prompt.substring(0, 30)}..." (${aspectRatio})`);
const client = new MetaAIClient({ cookies });
const results = await client.generate(prompt, imageCount);
// Diagnostic: Check how many cookies we received
try {
const parsed = typeof cookies === 'string' && cookies.trim().startsWith('[')
? JSON.parse(cookies)
: cookies;
const count = Array.isArray(parsed) ? parsed.length : (typeof cookies === 'string' ? cookies.split(';').length : 0);
console.log(`[Meta AI Route] Received ${count} cookies (Free Wrapper: ${useMetaFreeWrapper})`);
} catch {
console.log(`[Meta AI Route] Cookie format: ${typeof cookies}`);
}
const client = new MetaAIClient({
cookies: cookies || '',
useFreeWrapper: useMetaFreeWrapper,
freeWrapperUrl: metaFreeWrapperUrl
});
const results = await client.generate(prompt, imageCount, aspectRatio);
// Download images as base64 for storage
const images = await Promise.all(
@ -48,7 +64,7 @@ export async function POST(req: NextRequest) {
throw new Error("No valid images generated");
}
return NextResponse.json({ images: validImages });
return NextResponse.json({ success: true, images: validImages });
} catch (error: any) {
console.error("[Meta AI Route] Error:", error);

View file

@ -1,59 +1,66 @@
import { NextRequest, NextResponse } from 'next/server';
import { MetaCrawlClient } from '@/lib/providers/meta-crawl-client';
import { MetaAIClient } from '@/lib/providers/meta-client';
/**
* POST /api/meta/video
*
* Generate a video from a text prompt (and optionally an image) using Meta AI.
* - Text-to-Video: Just provide prompt and cookies
* - Image-to-Video: Also provide imageBase64
* Video generation takes 30-60+ seconds, so this endpoint may take a while.
* Generate a video from a text prompt using Meta AI's Kadabra engine.
* Uses MetaAIClient for session initialization (which works for images).
*/
const META_AI_BASE = "https://www.meta.ai";
const GRAPHQL_ENDPOINT = `${META_AI_BASE}/api/graphql/`;
// Video generation doc IDs from metaai-api
const VIDEO_INITIATE_DOC_ID = "25290947477183545"; // useKadabraSendMessageMutation
const VIDEO_POLL_DOC_ID = "25290569913909283"; // KadabraPromptRootQuery
export async function POST(req: NextRequest) {
try {
const { prompt, cookies: clientCookies, imageBase64 } = await req.json();
const { prompt, cookies: clientCookies, aspectRatio = 'portrait' } = await req.json();
if (!prompt) {
return NextResponse.json({ error: "Prompt is required" }, { status: 400 });
}
// Get cookies from request body or cookie header
let cookieString = clientCookies || req.cookies.get('meta_cookies')?.value;
if (!cookieString) {
if (!clientCookies) {
return NextResponse.json(
{ error: "Meta AI cookies not found. Please configure settings." },
{ status: 401 }
);
}
const mode = imageBase64 ? 'image-to-video' : 'text-to-video';
console.log(`[Meta Video API] Starting ${mode} for prompt: "${prompt.substring(0, 50)}..."`);
console.log(`[Meta Video API] Generating video for: "${prompt.substring(0, 50)}..." (${aspectRatio})`);
const client = new MetaCrawlClient();
// Use MetaAIClient for session initialization (proven to work)
const client = new MetaAIClient({ cookies: clientCookies });
const session = await client.getSession();
const cookieString = client.getCookies();
// Check if crawl4ai service is available
const isHealthy = await client.healthCheck();
if (!isHealthy) {
return NextResponse.json(
{ error: "Meta AI video service is not available. Make sure crawl4ai is running." },
{ status: 503 }
);
console.log("[Meta Video] Using MetaAIClient session:", {
hasLsd: !!session.lsd,
hasDtsg: !!session.fb_dtsg,
hasAccessToken: !!session.accessToken
});
// Generate unique IDs for this request
const externalConversationId = crypto.randomUUID();
const offlineThreadingId = Date.now().toString() + Math.random().toString().substring(2, 8);
// Initiate video generation with aspect ratio
await initiateVideoGeneration(prompt, externalConversationId, offlineThreadingId, session, cookieString, aspectRatio);
// Poll for video completion
const videos = await pollForVideoResult(externalConversationId, session, cookieString);
if (videos.length === 0) {
throw new Error("No videos generated");
}
// Generate video - this can take 30-60+ seconds
const result = await client.generateVideo(prompt, cookieString, imageBase64);
if (!result.success || result.videos.length === 0) {
throw new Error(result.error || "No videos generated");
}
console.log(`[Meta Video API] Successfully generated ${result.videos.length} video(s)`);
return NextResponse.json({
success: true,
videos: result.videos,
conversation_id: result.conversation_id
videos: videos.map(v => ({ url: v.url, prompt: prompt })),
conversation_id: externalConversationId
});
} catch (error: unknown) {
@ -62,7 +69,7 @@ export async function POST(req: NextRequest) {
const msg = err.message || "";
const isAuthError = msg.includes("401") || msg.includes("403") ||
msg.includes("auth") || msg.includes("cookies") || msg.includes("expired");
msg.includes("auth") || msg.includes("cookies") || msg.includes("expired") || msg.includes("Login");
return NextResponse.json(
{ error: err.message || "Video generation failed" },
@ -70,3 +77,295 @@ export async function POST(req: NextRequest) {
);
}
}
interface MetaSession {
lsd?: string;
fb_dtsg?: string;
accessToken?: string;
}
/**
* Normalize cookies from JSON array to string format
*/
function normalizeCookies(cookies: string): string {
if (!cookies) return '';
try {
const trimmed = cookies.trim();
if (trimmed.startsWith('[')) {
const parsed = JSON.parse(trimmed);
if (Array.isArray(parsed)) {
return parsed.map((c: any) => `${c.name}=${c.value}`).join('; ');
}
}
} catch (e) {
// Not JSON, assume it's already a string format
}
return cookies;
}
/**
* Initialize session - get access token and LSD from meta.ai page
*/
async function initSession(cookies: string, retryCount: number = 0): Promise<MetaSession> {
console.log("[Meta Video] Initializing session...");
console.log("[Meta Video] Cookie string length:", cookies.length);
// Add small delay to avoid rate limiting (especially after image generation)
if (retryCount > 0) {
await new Promise(resolve => setTimeout(resolve, 1000 * retryCount));
}
const response = await fetch(META_AI_BASE, {
headers: {
"Cookie": cookies,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
}
});
const html = await response.text();
console.log("[Meta Video] HTML Preview:", html.substring(0, 200));
// Detect Facebook error page and retry
if (html.includes('<title>Error</title>') || html.includes('id="facebook"')) {
console.warn("[Meta Video] Received Facebook error page, retrying...");
if (retryCount < 3) {
return initSession(cookies, retryCount + 1);
}
throw new Error("Meta AI: Server temporarily unavailable. Please try again in a moment.");
}
const session: MetaSession = {};
// Extract LSD token - multiple patterns for different Meta AI versions
const lsdMatch = html.match(/"LSD",\[\],\{"token":"([^"]+)"/) ||
html.match(/"lsd":"([^"]+)"/) ||
html.match(/name="lsd" value="([^"]+)"/) ||
html.match(/"token":"([^"]+)".*?"name":"lsd"/);
if (lsdMatch) {
session.lsd = lsdMatch[1];
}
// Extract access token
const tokenMatch = html.match(/"accessToken":"([^"]+)"/) ||
html.match(/accessToken['"]\s*:\s*['"]([^'"]+)['"]/);
if (tokenMatch) {
session.accessToken = tokenMatch[1];
}
// Extract DTSG token - try multiple patterns
const dtsgMatch = html.match(/DTSGInitData",\[\],\{"token":"([^"]+)"/) ||
html.match(/"DTSGInitialData".*?"token":"([^"]+)"/) ||
html.match(/fb_dtsg['"]\s*:\s*\{[^}]*['"]token['"]\s*:\s*['"]([^'"]+)['"]/);
if (dtsgMatch) {
session.fb_dtsg = dtsgMatch[1];
}
// Also try to extract from cookies if not found in HTML
if (!session.lsd) {
const lsdCookie = cookies.match(/lsd=([^;]+)/);
if (lsdCookie) session.lsd = lsdCookie[1];
}
if (!session.fb_dtsg) {
const dtsgCookie = cookies.match(/fb_dtsg=([^;]+)/);
if (dtsgCookie) session.fb_dtsg = dtsgCookie[1];
}
if (html.includes('login_form') || html.includes('login_page')) {
throw new Error("Meta AI: Cookies expired. Please update in Settings.");
}
console.log("[Meta Video] Session tokens extracted:", {
hasLsd: !!session.lsd,
hasDtsg: !!session.fb_dtsg,
hasAccessToken: !!session.accessToken
});
return session;
}
/**
* Initiate video generation using Kadabra mutation
*/
async function initiateVideoGeneration(
prompt: string,
conversationId: string,
threadingId: string,
session: MetaSession,
cookies: string,
aspectRatio: string = 'portrait'
): Promise<void> {
console.log("[Meta Video] Initiating video generation...");
// Map aspect ratio to orientation
const orientationMap: Record<string, string> = {
'portrait': 'VERTICAL',
'landscape': 'HORIZONTAL',
'square': 'SQUARE'
};
const orientation = orientationMap[aspectRatio] || 'VERTICAL';
const variables = {
message: {
prompt_text: prompt,
external_conversation_id: conversationId,
offline_threading_id: threadingId,
imagineClientOptions: { orientation: orientation },
selectedAgentType: "PLANNER"
},
__relay_internal__pv__AbraArtifactsEnabledrelayprovider: true
};
const body = new URLSearchParams({
fb_api_caller_class: "RelayModern",
fb_api_req_friendly_name: "useKadabraSendMessageMutation",
variables: JSON.stringify(variables),
doc_id: VIDEO_INITIATE_DOC_ID,
...(session.lsd && { lsd: session.lsd }),
...(session.fb_dtsg && { fb_dtsg: session.fb_dtsg })
});
const response = await fetch(GRAPHQL_ENDPOINT, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": cookies,
"Origin": META_AI_BASE,
"Referer": `${META_AI_BASE}/`,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
...(session.accessToken && { "Authorization": `OAuth ${session.accessToken}` })
},
body: body.toString()
});
if (!response.ok) {
const errorText = await response.text();
console.error("[Meta Video] Initiation failed:", response.status, errorText);
throw new Error(`Meta AI Error: ${response.status}`);
}
const data = await response.json();
console.log("[Meta Video] Initiation response:", JSON.stringify(data).substring(0, 200));
// Check for errors
if (data.errors) {
throw new Error(data.errors[0]?.message || "Video initiation failed");
}
}
/**
* Poll for video result using KadabraPromptRootQuery
*/
async function pollForVideoResult(
conversationId: string,
session: MetaSession,
cookies: string
): Promise<{ url: string }[]> {
const maxAttempts = 60; // 2 minutes max
const pollInterval = 2000;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
console.log(`[Meta Video] Polling attempt ${attempt + 1}/${maxAttempts}...`);
await new Promise(resolve => setTimeout(resolve, pollInterval));
const variables = {
external_conversation_id: conversationId
};
const body = new URLSearchParams({
fb_api_caller_class: "RelayModern",
fb_api_req_friendly_name: "KadabraPromptRootQuery",
variables: JSON.stringify(variables),
doc_id: VIDEO_POLL_DOC_ID,
...(session.lsd && { lsd: session.lsd }),
...(session.fb_dtsg && { fb_dtsg: session.fb_dtsg })
});
try {
const response = await fetch(GRAPHQL_ENDPOINT, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": cookies,
"Origin": META_AI_BASE,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
...(session.accessToken && { "Authorization": `OAuth ${session.accessToken}` })
},
body: body.toString()
});
const data = await response.json();
// Extract video URLs from response
const videos = extractVideosFromResponse(data);
if (videos.length > 0) {
console.log(`[Meta Video] Got ${videos.length} video(s)!`);
return videos;
}
// Check for error status
const status = data?.data?.kadabra_prompt?.status;
if (status === "FAILED" || status === "ERROR") {
throw new Error("Meta AI video generation failed");
}
} catch (e: any) {
console.error("[Meta Video] Poll error:", e.message);
if (attempt === maxAttempts - 1) throw e;
}
}
throw new Error("Meta AI: Video generation timed out");
}
/**
* Extract video URLs from GraphQL response
*/
function extractVideosFromResponse(response: any): { url: string }[] {
const videos: { url: string }[] = [];
try {
// Navigate through possible response structures
const prompt = response?.data?.kadabra_prompt;
const messages = prompt?.messages?.edges || [];
for (const edge of messages) {
const node = edge?.node;
const attachments = node?.attachments || [];
for (const attachment of attachments) {
// Check for video media
const media = attachment?.media;
if (media?.video_uri) {
videos.push({ url: media.video_uri });
}
if (media?.playable_url) {
videos.push({ url: media.playable_url });
}
if (media?.browser_native_sd_url) {
videos.push({ url: media.browser_native_sd_url });
}
}
// Check imagine_card for video results
const imagineCard = node?.imagine_card;
if (imagineCard?.session?.media_sets) {
for (const mediaSet of imagineCard.session.media_sets) {
for (const media of mediaSet?.imagine_media || []) {
if (media?.video_uri) {
videos.push({ url: media.video_uri });
}
}
}
}
}
} catch (e) {
console.error("[Meta Video] Error extracting videos:", e);
}
return videos;
}

View file

@ -52,8 +52,8 @@ export default function Home() {
</div>
</main>
{/* Floating Chat */}
{/* <GrokChat /> */}
{/* Floating AI Chat */}
<GrokChat />
<CookieExpiredDialog />
</div>
);

View file

@ -7,7 +7,7 @@ import { cn } from '@/lib/utils';
interface EditPromptModalProps {
isOpen: boolean;
onClose: () => void;
image: { data: string; prompt: string } | null;
image: { data: string; prompt: string; provider?: string } | null;
onGenerate: (prompt: string, options: { keepSubject: boolean; keepScene: boolean; keepStyle: boolean }) => Promise<void>;
}
@ -18,12 +18,16 @@ export function EditPromptModal({ isOpen, onClose, image, onGenerate }: EditProm
const [keepScene, setKeepScene] = React.useState(true);
const [keepStyle, setKeepStyle] = React.useState(true);
const isMeta = image?.provider === 'meta';
React.useEffect(() => {
if (isOpen && image) {
setPrompt(image.prompt);
}
}, [isOpen, image]);
// ... (lines 27-130 remain unchanged, so we skip them in replace tool if possible,
if (!isOpen) return null;
const handleSubmit = async (e: React.FormEvent) => {
@ -122,13 +126,14 @@ export function EditPromptModal({ isOpen, onClose, image, onGenerate }: EditProm
value={prompt}
onChange={(e) => setPrompt(e.target.value)}
className="w-full h-24 p-3 rounded-xl bg-white/5 border border-white/10 resize-none focus:ring-2 focus:ring-amber-500/50 focus:border-amber-500/50 outline-none text-sm text-white placeholder:text-white/30 transition-all"
placeholder="Describe your remix... The selected consistency options will be preserved..."
placeholder={isMeta ? "Modify your prompt to generate a new variation..." : "Describe your remix... The selected consistency options will be preserved..."}
autoFocus
/>
</div>
</div>
{/* Consistency Toggles */}
{!isMeta && (
<div className="mt-4">
<label className="text-xs font-medium text-white/50 mb-2 block">Keep Consistent:</label>
<div className="flex flex-wrap gap-2">
@ -152,13 +157,16 @@ export function EditPromptModal({ isOpen, onClose, image, onGenerate }: EditProm
/>
</div>
</div>
)}
{/* Info about consistency */}
{!isMeta && (
<div className="mt-4 p-3 bg-white/5 rounded-xl border border-white/10">
<p className="text-xs text-white/50">
<span className="text-amber-400">💡</span> Locked elements will be used as references to maintain visual consistency across generations.
</p>
</div>
)}
{/* Actions */}
<div className="flex justify-end gap-3 mt-6">

View file

@ -11,23 +11,47 @@ import { EditPromptModal } from './EditPromptModal';
// Helper function to get proper image src (handles URLs vs base64)
const getImageSrc = (data: string): string => {
if (!data) return '';
const cleanData = data.trim();
// If it's already a URL, use it directly
if (data.startsWith('http://') || data.startsWith('https://') || data.startsWith('data:')) {
return data;
if (cleanData.indexOf('http') === 0 || cleanData.indexOf('data:') === 0) {
return cleanData;
}
// Otherwise, treat as base64
return `data:image/png;base64,${data}`;
// Otherwise, treat as base64 (don't warn - base64 often contains 'http' as random characters)
return `data:image/png;base64,${cleanData}`;
};
export function Gallery() {
const { gallery, clearGallery, removeFromGallery, setPrompt, addVideo, addToGallery, settings, videos, removeVideo, isGenerating } = useStore();
const [selectedIndex, setSelectedIndex] = React.useState<number | null>(null);
const {
gallery, loadGallery, addToGallery, removeFromGallery, clearGallery,
isGenerating,
settings,
videos, addVideo,
setPrompt
} = useStore();
const [videoModalOpen, setVideoModalOpen] = React.useState(false);
const [videoSource, setVideoSource] = React.useState<{ data: string, prompt: string } | null>(null);
const [videoSource, setVideoSource] = React.useState<{ data: string, prompt: string, provider?: string } | null>(null);
const [editModalOpen, setEditModalOpen] = React.useState(false);
const [editSource, setEditSource] = React.useState<{ data: string, prompt: string } | null>(null);
const [editPromptValue, setEditPromptValue] = React.useState('');
const [videoPromptValue, setVideoPromptValue] = React.useState('');
const [useSourceImage, setUseSourceImage] = React.useState(true);
const [selectedIndex, setSelectedIndex] = React.useState<number | null>(null);
const openVideoModal = (img: { data: string, prompt: string }) => {
React.useEffect(() => {
if (selectedIndex !== null && gallery[selectedIndex]) {
setEditSource(gallery[selectedIndex]);
setEditPromptValue(gallery[selectedIndex].prompt || '');
setVideoPromptValue('');
setUseSourceImage(true);
}
}, [selectedIndex, gallery]);
React.useEffect(() => {
loadGallery();
}, []); // Only load on mount
const openVideoModal = (img: { data: string, prompt: string, provider?: string }) => {
setVideoSource(img);
setVideoModalOpen(true);
};
@ -37,27 +61,52 @@ export function Gallery() {
setEditModalOpen(true);
};
const [isGeneratingMetaVideo, setIsGeneratingMetaVideo] = React.useState(false);
const [isGeneratingMetaVideo, setIsGeneratingMetaVideo] = React.useState(false); // Kept for UI state compatibility
const [isGeneratingWhiskVideo, setIsGeneratingWhiskVideo] = React.useState(false);
// Handle Meta AI video generation (image-to-video)
const handleGenerateMetaVideo = async (img: { data: string; prompt: string }) => {
if (!settings.metaCookies) {
alert("Please set your Meta AI Cookies in Settings first!");
// Handle Meta AI video generation (text-to-video via Kadabra)
const handleGenerateMetaVideo = async (img: { data: string; prompt: string }, customPrompt?: string) => {
if (!settings.metaCookies && !settings.facebookCookies) {
alert("Please set your Meta AI (or Facebook) Cookies in Settings first!");
return;
}
setIsGeneratingMetaVideo(true);
try {
console.log("[Gallery] Starting Meta AI image-to-video...");
console.log("[Gallery] Starting Meta AI video generation...");
// Create a descriptive prompt that includes the original image context + animation
const originalPrompt = img.prompt || "";
const animationDescription = customPrompt || "natural movement";
// Combine original image description with animation instruction
const promptText = originalPrompt
? `Create a video of: ${originalPrompt}. Animation: ${animationDescription}`
: `Create an animated video with: ${animationDescription}`;
console.log("[Gallery] Meta video prompt:", promptText);
// Merge cookies safely
let mergedCookies = settings.metaCookies;
try {
const safeParse = (str: string) => {
if (!str || str === "undefined" || str === "null") return [];
try { return JSON.parse(str); } catch { return []; }
};
const m = safeParse(settings.metaCookies);
const f = safeParse(settings.facebookCookies);
if (Array.isArray(m) || Array.isArray(f)) {
mergedCookies = [...(Array.isArray(m) ? m : []), ...(Array.isArray(f) ? f : [])] as any;
}
} catch (e) { console.error("Cookie merge failed", e); }
const res = await fetch('/api/meta/video', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt: img.prompt || "Animate this image with natural movement",
cookies: settings.metaCookies,
imageBase64: img.data
prompt: promptText,
cookies: typeof mergedCookies === 'string' ? mergedCookies : JSON.stringify(mergedCookies)
})
});
@ -69,19 +118,20 @@ export function Gallery() {
addVideo({
id: crypto.randomUUID(),
url: video.url,
prompt: video.prompt || img.prompt,
prompt: promptText,
thumbnail: img.data,
createdAt: Date.now()
});
}
alert('🎬 Video generation complete! Scroll up to see your video.');
setVideoModalOpen(false);
} else {
throw new Error(data.error || 'No videos generated');
}
} catch (error: any) {
console.error("[Gallery] Meta video error:", error);
let errorMessage = error.message || 'Video generation failed';
if (errorMessage.includes('401') || errorMessage.includes('cookies')) {
if (errorMessage.includes('401') || errorMessage.includes('cookies') || errorMessage.includes('expired')) {
errorMessage = '🔐 Your Meta AI cookies have expired. Please go to Settings and update them.';
}
alert(errorMessage);
@ -90,21 +140,30 @@ export function Gallery() {
}
};
const handleGenerateVideo = async (prompt: string) => {
if (!videoSource) return;
const handleGenerateVideo = async (prompt: string, sourceOverride?: { data: string; prompt: string; provider?: string; aspectRatio?: string }) => {
const activeSource = sourceOverride || videoSource;
if (!activeSource) return;
// Route to Meta AI video for meta provider
if (activeSource.provider === 'meta') {
await handleGenerateMetaVideo(activeSource, prompt);
return;
}
if (!settings.whiskCookies) {
alert("Please set your Whisk Cookies in Settings first!");
throw new Error("Missing Whisk cookies");
}
setIsGeneratingWhiskVideo(true);
try {
const res = await fetch('/api/video/generate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt: prompt,
imageBase64: videoSource.data,
// imageGenerationId: (videoSource as any).id, // REMOVE: "id" is a local DB ID (e.g. 1), not a Whisk Media ID.
imageBase64: activeSource.data,
cookies: settings.whiskCookies
})
});
@ -117,34 +176,101 @@ export function Gallery() {
id: data.id,
url: data.url,
prompt: prompt,
thumbnail: videoSource.data, // Use source image as thumb
thumbnail: activeSource.data,
createdAt: Date.now()
});
// Success notification
setTimeout(() => {
alert('🎬 Video generation complete!\n\nYour video has been saved. Go to the "Uploads" page and select the "Videos" tab to view it.');
}, 100);
} else {
console.error(data.error);
// Show user-friendly error messages for Google safety policies
let errorMessage = data.error;
if (data.error?.includes('NCII')) {
errorMessage = '🚫 Content Policy: Video blocked by Google\'s NCII (Non-Consensual Intimate Imagery) protection. Please try with a different source image.';
errorMessage = '🚫 Content Policy: Video blocked by Google\'s NCII protection. Please try with a different source image.';
} else if (data.error?.includes('PROMINENT_PEOPLE') || data.error?.includes('prominent')) {
errorMessage = '🚫 Content Policy: Video blocked because the image contains a recognizable person. Try using a different image.';
errorMessage = '🚫 Content Policy: Video blocked because the image contains a recognizable person.';
} else if (data.error?.includes('safety') || data.error?.includes('SAFETY')) {
errorMessage = '⚠️ Content Policy: Video blocked by Google\'s safety filters. Try a different source image.';
errorMessage = '⚠️ Content Policy: Video blocked by Google\'s safety filters.';
} else if (data.error?.includes('401') || data.error?.includes('UNAUTHENTICATED')) {
errorMessage = '🔐 Authentication Error: Your Whisk (Google) cookies have expired. Please go to Settings and update them.';
errorMessage = '🔐 Authentication Error: Your Whisk cookies have expired. Please update in Settings.';
} else if (data.error?.includes('429') || data.error?.includes('RESOURCE_EXHAUSTED')) {
errorMessage = '⏱️ Rate Limit: Too many requests. Please wait a few minutes and try again.';
}
alert(errorMessage);
throw new Error(data.error);
}
} finally {
setIsGeneratingWhiskVideo(false);
}
};
const handleRemix = async (prompt: string, options: { keepSubject: boolean; keepScene: boolean; keepStyle: boolean }) => {
if (!editSource) return;
// Meta AI Remix Flow (Prompt Edit Only)
if (editSource.provider === 'meta') {
if (!settings.metaCookies && !settings.facebookCookies) {
alert("Please set your Meta AI (or Facebook) Cookies in Settings first!");
return;
}
try {
// Merge cookies safely
let mergedCookies = settings.metaCookies;
try {
const safeParse = (str: string) => {
if (!str || str === "undefined" || str === "null") return [];
try { return JSON.parse(str); } catch { return []; }
};
const m = safeParse(settings.metaCookies);
const f = safeParse(settings.facebookCookies);
if (Array.isArray(m) || Array.isArray(f)) {
mergedCookies = [...(Array.isArray(m) ? m : []), ...(Array.isArray(f) ? f : [])] as any;
}
} catch (e) { console.error("Cookie merge failed", e); }
const res = await fetch('/api/meta/generate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt: prompt,
cookies: typeof mergedCookies === 'string' ? mergedCookies : JSON.stringify(mergedCookies),
imageCount: 4
})
});
const data = await res.json();
if (data.error) throw new Error(data.error);
if (data.success && data.images?.length > 0) {
// Add new images to gallery
const newImages = data.images.map((img: any) => ({
id: crypto.randomUUID(),
data: img.data, // Base64
prompt: prompt,
createdAt: Date.now(),
width: 1024,
height: 1024,
aspectRatio: settings.aspectRatio,
provider: 'meta'
}));
// Add to store
newImages.forEach(addGeneratedImage);
alert('✨ Remix complete! New images added to gallery.');
setEditModalOpen(false);
} else {
throw new Error('No images generated');
}
} catch (e: any) {
console.error("Meta Remix failed", e);
alert("Remix failed: " + e.message);
}
return;
}
// Whisk Remix Flow (Reference Injection)
if (!settings.whiskCookies) {
alert("Please set your Whisk Cookies in Settings first!");
throw new Error("Missing Whisk cookies");
@ -220,9 +346,41 @@ export function Gallery() {
return null; // Or return generic empty state if controlled by parent, but parent checks length usually
}
const handleClearAll = () => {
if (window.confirm("Delete all " + gallery.length + " images?")) {
const handleClearAll = async () => {
const count = gallery.length;
if (!window.confirm(`Delete all ${count} images? This will reset the gallery database.`)) return;
try {
console.log("[Gallery] Hard clearing...");
// 1. Clear Zustand Store visual state immediate
clearGallery();
// 2. Nuclear Option: Delete the entire database file
console.log("[Gallery] Deleting IndexedDB...");
const req = window.indexedDB.deleteDatabase('kv-pix-db');
req.onsuccess = () => {
console.log("✅ DB Deleted successfully");
// Clear localStorage persistence too just in case
localStorage.removeItem('kv-pix-storage');
window.location.reload();
};
req.onerror = (e) => {
console.error("❌ Failed to delete DB", e);
alert("Failed to delete database. Browser might be blocking it.");
window.location.reload();
};
req.onblocked = () => {
console.warn("⚠️ DB Delete blocked - reloading to free locks");
window.location.reload();
};
} catch (e) {
console.error("[Gallery] Delete error:", e);
alert("❌ Failed to delete: " + String(e));
}
};
@ -377,7 +535,7 @@ export function Gallery() {
)}
{selectedIndex < gallery.length - 1 && (
<button
className="absolute right-2 md:right-4 top-1/2 -translate-y-1/2 p-2 md:p-3 bg-white/10 hover:bg-white/20 rounded-full text-white transition-colors z-50"
className="absolute left-[calc(50%-2rem)] md:left-[calc(50%+8rem)] top-1/2 -translate-y-1/2 p-2 md:p-3 bg-white/10 hover:bg-white/20 rounded-full text-white transition-colors z-50"
onClick={(e) => { e.stopPropagation(); setSelectedIndex(prev => prev! + 1); }}
>
<ChevronRight className="h-6 w-6 md:h-8 md:w-8" />
@ -415,88 +573,139 @@ export function Gallery() {
</div>
)}
{/* Prompt Section */}
{/* Prompt Section (Editable) */}
<div className="space-y-2">
<div className="flex items-center justify-between">
<h3 className="text-xs font-medium text-white/50 uppercase tracking-wider">Prompt</h3>
<p className="text-white/90 text-sm leading-relaxed">
{selectedImage.prompt || "No prompt available"}
</p>
{editPromptValue !== selectedImage.prompt && (
<span className="text-[10px] text-amber-400 font-medium animate-pulse">Modified</span>
)}
</div>
<textarea
value={editPromptValue}
onChange={(e) => setEditPromptValue(e.target.value)}
className="w-full h-24 bg-black/20 border border-white/10 rounded-lg p-3 text-sm text-white resize-none focus:ring-1 focus:ring-amber-500/30 outline-none placeholder:text-white/20"
placeholder="Enter prompt..."
/>
<div className="flex gap-2">
{(!selectedImage.provider || selectedImage.provider === 'whisk' || selectedImage.provider === 'meta') && (
<button
onClick={() => openEditModal({ ...selectedImage, prompt: editPromptValue })}
className="flex-1 py-2 bg-gradient-to-r from-amber-600 to-orange-600 hover:from-amber-500 hover:to-orange-500 rounded-lg text-xs font-medium text-white transition-all flex items-center justify-center gap-2"
>
<Wand2 className="h-3 w-3" />
<span>Remix</span>
</button>
)}
<button
onClick={() => {
navigator.clipboard.writeText(editPromptValue);
alert("Prompt copied!");
}}
className={cn(
"px-3 py-2 bg-white/10 hover:bg-white/20 rounded-lg text-white transition-colors",
(!selectedImage.provider || selectedImage.provider === 'whisk') ? "" : "flex-1"
)}
title="Copy Prompt"
>
<Copy className="h-4 w-4 mx-auto" />
</button>
</div>
</div>
{/* Divider */}
<div className="border-t border-white/10" />
{/* Actions */}
<div className="space-y-3">
<h3 className="text-xs font-medium text-white/50 uppercase tracking-wider">Actions</h3>
{/* Video Generation Section */}
<div className="space-y-2">
<div className="flex items-center justify-between">
<h3 className="text-xs font-medium text-white/50 uppercase tracking-wider flex items-center gap-2">
<Film className="h-3 w-3" />
Animate
</h3>
</div>
<textarea
value={videoPromptValue}
onChange={(e) => setVideoPromptValue(e.target.value)}
placeholder="Describe movement (e.g. natural movement, zoom in)..."
className="w-full h-20 bg-black/20 border border-white/10 rounded-lg p-3 text-sm text-white resize-none focus:ring-1 focus:ring-purple-500/50 outline-none placeholder:text-white/30"
/>
{(() => {
const isGenerating = isGeneratingMetaVideo || isGeneratingWhiskVideo;
const isWhisk = !selectedImage.provider || selectedImage.provider === 'whisk';
const isMeta = selectedImage.provider === 'meta';
const is16by9 = selectedImage.aspectRatio === '16:9';
// Only Whisk with 16:9 can generate video - Meta video API not available
const canGenerate = isWhisk && is16by9;
{/* Download */}
<a
href={getImageSrc(selectedImage.data)}
download={"generated-" + selectedIndex + "-" + Date.now() + ".png"}
className="flex items-center gap-3 w-full px-4 py-3 bg-white/10 hover:bg-white/15 rounded-lg text-white font-medium transition-colors"
>
<Download className="h-5 w-5 text-green-400" />
<span>Download Image</span>
</a>
{/* Generate Video - Show for all providers */}
return (
<button
onClick={() => {
if (selectedImage.provider === 'meta') {
handleGenerateMetaVideo(selectedImage);
} else {
openVideoModal(selectedImage);
}
}}
disabled={isGeneratingMetaVideo}
onClick={() => handleGenerateVideo(videoPromptValue, selectedImage)}
disabled={isGenerating || !canGenerate}
className={cn(
"flex items-center gap-3 w-full px-4 py-3 rounded-lg text-white font-medium transition-all",
isGeneratingMetaVideo
"relative z-10 w-full py-2 rounded-lg text-xs font-medium text-white transition-all flex items-center justify-center gap-2",
isGenerating
? "bg-gray-600 cursor-wait"
: selectedImage.provider === 'meta'
? "bg-gradient-to-r from-blue-600 to-cyan-600 hover:from-blue-500 hover:to-cyan-500"
: "bg-gradient-to-r from-purple-600 to-indigo-600 hover:from-purple-500 hover:to-indigo-500"
: !canGenerate
? "bg-gray-600/50 cursor-not-allowed opacity-60"
: "bg-purple-600 hover:bg-purple-500"
)}
>
{isGeneratingMetaVideo ? (
{isGenerating ? (
<>
<div className="h-5 w-5 animate-spin rounded-full border-2 border-white border-t-transparent" />
<div className="h-3 w-3 animate-spin rounded-full border-2 border-white border-t-transparent" />
<span>Generating Video...</span>
</>
) : isMeta ? (
<>
<Film className="h-3.5 w-3.5 opacity-50" />
<span>Video coming soon</span>
</>
) : !canGenerate ? (
<>
<Film className="h-3.5 w-3.5 opacity-50" />
<span>Video requires 16:9 ratio</span>
</>
) : (
<>
<Film className="h-5 w-5" />
<Film className="h-3.5 w-3.5" />
<span>Generate Video</span>
</>
)}
</button>
);
})()}
</div>
{/* Remix/Edit - Only for Whisk */}
{(!selectedImage.provider || selectedImage.provider === 'whisk') && (
<button
onClick={() => openEditModal(selectedImage)}
className="flex items-center gap-3 w-full px-4 py-3 bg-gradient-to-r from-amber-600 to-orange-600 hover:from-amber-500 hover:to-orange-500 rounded-lg text-white font-medium transition-all"
{/* Divider */}
<div className="border-t border-white/10" />
{/* Other Actions */}
<div className="space-y-2">
<h3 className="text-xs font-medium text-white/50 uppercase tracking-wider">Other Actions</h3>
<div className="grid grid-cols-2 gap-2">
<a
href={getImageSrc(selectedImage.data)}
download={"generated-" + selectedIndex + "-" + Date.now() + ".png"}
className="flex items-center justify-center gap-2 px-3 py-2 bg-white/5 hover:bg-white/10 rounded-lg text-white/80 text-xs font-medium transition-colors"
>
<Wand2 className="h-5 w-5" />
<span>Remix / Edit</span>
</button>
)}
<Download className="h-3.5 w-3.5" />
<span>Download</span>
</a>
{/* Use Prompt */}
<button
onClick={() => {
setPrompt(selectedImage.prompt);
navigator.clipboard.writeText(selectedImage.prompt);
setSelectedIndex(null);
}}
className="flex items-center gap-3 w-full px-4 py-3 bg-white/10 hover:bg-white/15 rounded-lg text-white font-medium transition-colors"
className="flex items-center justify-center gap-2 px-3 py-2 bg-white/5 hover:bg-white/10 rounded-lg text-white/80 text-xs font-medium transition-colors"
>
<Copy className="h-5 w-5 text-purple-400" />
<span>Copy & Use Prompt</span>
<Sparkles className="h-3.5 w-3.5" />
<span>Use Prompt</span>
</button>
</div>
{/* Delete */}
<button
onClick={() => {
if (selectedImage.id) {
@ -504,9 +713,9 @@ export function Gallery() {
setSelectedIndex(null);
}
}}
className="flex items-center gap-3 w-full px-4 py-3 bg-red-500/10 hover:bg-red-500/20 rounded-lg text-red-400 font-medium transition-colors border border-red-500/20"
className="flex items-center justify-center gap-2 w-full px-3 py-2 bg-red-500/10 hover:bg-red-500/20 rounded-lg text-red-400 text-xs font-medium transition-colors border border-red-500/20"
>
<Trash2 className="h-5 w-5" />
<Trash2 className="h-3.5 w-3.5" />
<span>Delete Image</span>
</button>
</div>

View file

@ -2,7 +2,7 @@
import React, { useState, useRef, useEffect } from 'react';
import { motion, AnimatePresence } from 'framer-motion';
import { MessageCircle, X, Send, MinusSquare, Maximize2, Minimize2, Loader2, Bot } from 'lucide-react';
import { X, Send, Maximize2, Minimize2, Loader2, Bot, Zap, Brain } from 'lucide-react';
import { cn } from '@/lib/utils';
import { useStore } from '@/lib/store';
@ -11,12 +11,20 @@ interface Message {
content: string;
}
type AIProvider = 'grok' | 'meta';
const aiProviders = [
{ id: 'grok' as AIProvider, name: 'Grok', icon: Zap, color: 'text-purple-400' },
{ id: 'meta' as AIProvider, name: 'Llama 3', icon: Brain, color: 'text-blue-400' },
];
export function GrokChat() {
const [isOpen, setIsOpen] = useState(false);
const [isMinimized, setIsMinimized] = useState(false);
const [messages, setMessages] = useState<Message[]>([]);
const [input, setInput] = useState('');
const [isLoading, setIsLoading] = useState(false);
const [selectedAI, setSelectedAI] = useState<AIProvider>('grok');
const messagesEndRef = useRef<HTMLDivElement>(null);
const inputRef = useRef<HTMLInputElement>(null);
@ -41,61 +49,42 @@ export function GrokChat() {
setIsLoading(true);
try {
// Retrieve history for context (optional, limiting to last 10 messages)
const history = messages.slice(-10).map(m => ({ role: m.role, content: m.content }));
// Get cookies from store
const { settings } = useStore.getState();
const grokCookies = settings.grokCookies;
// Parse cookies string to object if retrieved from text area (simple key=value parsing)
let cookieObj: Record<string, string> = {};
if (grokCookies) {
// Basic parsing for "name=value; name2=value2" or JSON
try {
// Try JSON first
const parsed = JSON.parse(grokCookies);
let res: Response;
if (Array.isArray(parsed)) {
// Handle standard cookie export format (list of objects)
parsed.forEach((c: any) => {
if (c.name && c.value) {
cookieObj[c.name] = c.value;
}
});
} else if (typeof parsed === 'object' && parsed !== null) {
// Handle direct key-value object
// Cast to ensure type compatibility if needed, though 'parsed' is anyish here
cookieObj = parsed as Record<string, string>;
}
} catch {
// Try semicolon separated
grokCookies.split(';').forEach((c: string) => {
const parts = c.trim().split('=');
if (parts.length >= 2) {
const key = parts[0].trim();
const val = parts.slice(1).join('=').trim();
if (key && val) cookieObj[key] = val;
}
});
}
}
if (selectedAI === 'grok') {
// Use Grok via xLmiler backend
const grokApiUrl = settings.grokApiUrl || 'http://localhost:3000';
const apiKey = settings.grokApiKey;
const res = await fetch('/api/grok-debug', {
res = await fetch('/api/grok-chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: userMsg,
history: history,
cookies: cookieObj,
userAgent: navigator.userAgent
history,
grokApiUrl,
apiKey
})
});
} else {
// Use Meta AI (Llama 3)
res = await fetch('/api/meta-chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: userMsg,
history,
metaCookies: settings.metaCookies
})
});
}
const data = await res.json();
if (data.error || data.detail) {
// Handle both simple error string and FastAPI detail array
const errorMsg = data.error || JSON.stringify(data.detail);
throw new Error(errorMsg);
}
@ -103,10 +92,10 @@ export function GrokChat() {
setMessages(prev => [...prev, { role: 'assistant', content: data.response }]);
} catch (error: any) {
console.error('Grok Chat Error:', error);
console.error('Chat Error:', error);
setMessages(prev => [...prev, {
role: 'assistant',
content: `Error: ${error.message || 'Failed to connect to Grok.'}`
content: `Error: ${error.message || 'Failed to connect.'}`
}]);
} finally {
setIsLoading(false);
@ -120,6 +109,8 @@ export function GrokChat() {
}
};
const currentProvider = aiProviders.find(p => p.id === selectedAI)!;
return (
<div className="fixed bottom-4 right-4 z-[100] flex flex-col items-end pointer-events-none">
@ -147,27 +138,45 @@ export function GrokChat() {
y: 0,
scale: 1,
height: isMinimized ? 'auto' : '500px',
width: isMinimized ? '300px' : '380px'
width: isMinimized ? '300px' : '400px'
}}
exit={{ opacity: 0, y: 20, scale: 0.9 }}
className="pointer-events-auto bg-black/90 backdrop-blur-xl border border-white/10 rounded-2xl shadow-2xl overflow-hidden flex flex-col"
>
{/* Header */}
<div className="flex items-center justify-between p-4 border-b border-white/10 bg-white/5 cursor-pointer"
onClick={() => setIsMinimized(!isMinimized)}>
<div className="flex items-center gap-2">
<Bot className="h-5 w-5 text-purple-400" />
<span className="font-bold text-white tracking-wide">Grok AI</span>
<div className="flex items-center justify-between p-4 border-b border-white/10 bg-white/5">
<div className="flex items-center gap-3">
{/* AI Selector */}
<div className="flex bg-white/5 rounded-lg p-0.5">
{aiProviders.map((provider) => (
<button
key={provider.id}
onClick={() => {
setSelectedAI(provider.id);
setMessages([]); // Clear history on switch
}}
className={cn(
"flex items-center gap-1.5 px-2.5 py-1.5 rounded-md text-xs font-medium transition-all",
selectedAI === provider.id
? "bg-white/10 text-white"
: "text-white/50 hover:text-white/80"
)}
>
<provider.icon className={cn("h-3.5 w-3.5", selectedAI === provider.id && provider.color)} />
{provider.name}
</button>
))}
</div>
</div>
<div className="flex items-center gap-1">
<button
onClick={(e) => { e.stopPropagation(); setIsMinimized(!isMinimized); }}
onClick={() => setIsMinimized(!isMinimized)}
className="p-1.5 hover:bg-white/10 rounded-md text-white/70 hover:text-white transition-colors"
>
{isMinimized ? <Maximize2 className="h-4 w-4" /> : <Minimize2 className="h-4 w-4" />}
</button>
<button
onClick={(e) => { e.stopPropagation(); setIsOpen(false); }}
onClick={() => setIsOpen(false)}
className="p-1.5 hover:bg-red-500/20 hover:text-red-400 rounded-md text-white/70 transition-colors"
>
<X className="h-4 w-4" />
@ -181,8 +190,8 @@ export function GrokChat() {
<div className="flex-1 overflow-y-auto p-4 space-y-4 custom-scrollbar">
{messages.length === 0 && (
<div className="flex flex-col items-center justify-center h-full text-center text-white/30 space-y-2">
<Bot className="h-12 w-12 opacity-20" />
<p className="text-sm">Ask Grok anything...</p>
<currentProvider.icon className={cn("h-12 w-12 opacity-50", currentProvider.color)} />
<p className="text-sm">Ask {currentProvider.name} anything...</p>
</div>
)}
{messages.map((msg, idx) => (
@ -193,7 +202,9 @@ export function GrokChat() {
<div className={cn(
"max-w-[85%] rounded-2xl px-4 py-2.5 text-sm leading-relaxed",
msg.role === 'user'
? selectedAI === 'grok'
? "bg-purple-600/80 text-white rounded-br-sm"
: "bg-blue-600/80 text-white rounded-br-sm"
: "bg-white/10 text-white/90 rounded-bl-sm"
)}>
{msg.content}
@ -203,8 +214,8 @@ export function GrokChat() {
{isLoading && (
<div className="flex justify-start">
<div className="bg-white/5 rounded-2xl px-4 py-2 flex items-center gap-2">
<Loader2 className="h-4 w-4 animate-spin text-purple-400" />
<span className="text-xs text-white/50">Computing...</span>
<Loader2 className={cn("h-4 w-4 animate-spin", currentProvider.color)} />
<span className="text-xs text-white/50">Thinking...</span>
</div>
</div>
)}
@ -220,14 +231,24 @@ export function GrokChat() {
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyDown={handleKeyDown}
placeholder="Type a message..."
className="flex-1 bg-black/50 border border-white/10 rounded-xl px-4 py-2.5 text-sm text-white focus:outline-none focus:border-purple-500/50 focus:ring-1 focus:ring-purple-500/20 transition-all placeholder:text-white/20"
placeholder={`Message ${currentProvider.name}...`}
className={cn(
"flex-1 bg-black/50 border border-white/10 rounded-xl px-4 py-2.5 text-sm text-white focus:outline-none transition-all placeholder:text-white/20",
selectedAI === 'grok'
? "focus:border-purple-500/50 focus:ring-1 focus:ring-purple-500/20"
: "focus:border-blue-500/50 focus:ring-1 focus:ring-blue-500/20"
)}
disabled={isLoading}
/>
<button
onClick={handleSend}
disabled={!input.trim() || isLoading}
className="p-2.5 bg-purple-600 hover:bg-purple-500 disabled:opacity-50 disabled:cursor-not-allowed rounded-xl text-white transition-colors shadow-lg shadow-purple-900/20"
className={cn(
"p-2.5 disabled:opacity-50 disabled:cursor-not-allowed rounded-xl text-white transition-colors shadow-lg",
selectedAI === 'grok'
? "bg-purple-600 hover:bg-purple-500 shadow-purple-900/20"
: "bg-blue-600 hover:bg-blue-500 shadow-blue-900/20"
)}
>
<Send className="h-4 w-4" />
</button>

View file

@ -3,7 +3,7 @@
import React, { useRef, useState, useEffect } from "react";
import { useStore, ReferenceCategory } from "@/lib/store";
import { cn } from "@/lib/utils";
import { Sparkles, Maximize2, X, Hash, AlertTriangle, Upload, Zap, Brain, Settings, Settings2, Video } from "lucide-react";
import { Sparkles, Maximize2, X, Hash, AlertTriangle, Upload, Brain, Settings, Settings2 } from "lucide-react";
const IMAGE_COUNTS = [1, 2, 4];
@ -19,13 +19,41 @@ export function PromptHero() {
} = useStore();
const [isGenerating, setLocalIsGenerating] = useState(false);
const [isGeneratingVideo, setIsGeneratingVideo] = useState(false);
const { addVideo } = useStore();
const [uploadingRefs, setUploadingRefs] = useState<Record<string, boolean>>({});
const [errorNotification, setErrorNotification] = useState<{ message: string; type: 'error' | 'warning' } | null>(null);
const textareaRef = useRef<HTMLTextAreaElement>(null);
// CLEANUP: Remove corrupted localStorage keys that crash browser extensions
useEffect(() => {
try {
if (typeof window !== 'undefined') {
for (let i = 0; i < localStorage.length; i++) {
const key = localStorage.key(i);
if (key) {
const val = localStorage.getItem(key);
// Clean up "undefined" string values which cause JSON.parse errors in extensions
if (val === "undefined" || val === "null") {
console.warn(`[Cleanup] Removing corrupted localStorage key: ${key}`);
localStorage.removeItem(key);
}
}
}
}
} catch (e) {
console.error("Storage cleanup failed", e);
}
}, []);
// Auto-enable Precise mode when references are added
useEffect(() => {
const hasReferences = Object.values(references).some(refs => refs && refs.length > 0);
if (hasReferences && !settings.preciseMode) {
setSettings({ preciseMode: true });
}
}, [references, settings.preciseMode, setSettings]);
// File input refs for each reference category
const fileInputRefs = {
subject: useRef<HTMLInputElement>(null),
@ -67,28 +95,46 @@ export function PromptHero() {
const provider = settings.provider || 'whisk';
let res: Response;
if (provider === 'grok') {
// Grok API
res = await fetch('/api/grok/generate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt: finalPrompt,
apiKey: settings.grokApiKey,
cookies: settings.grokCookies,
imageCount: settings.imageCount
})
});
} else if (provider === 'meta') {
// Meta AI via Python service (metaai-api)
if (provider === 'meta') {
// Image Generation Path (Meta AI)
// Video is now handled by handleGenerateVideo
// Prepend aspect ratio for better adherence
let metaPrompt = finalPrompt;
if (settings.aspectRatio === '16:9') {
metaPrompt = "wide 16:9 landscape image of " + finalPrompt;
} else if (settings.aspectRatio === '9:16') {
metaPrompt = "tall 9:16 portrait image of " + finalPrompt;
}
// Merge cookies safely
let mergedCookies = settings.metaCookies;
try {
const safeParse = (str: string) => {
if (!str || str === "undefined" || str === "null") return [];
try { return JSON.parse(str); } catch { return []; }
};
const m = safeParse(settings.metaCookies);
const f = safeParse(settings.facebookCookies);
if (Array.isArray(m) || Array.isArray(f)) {
mergedCookies = [...(Array.isArray(m) ? m : []), ...(Array.isArray(f) ? f : [])];
}
} catch (e) { console.error("Cookie merge failed", e); }
// Meta AI always generates 4 images, hardcode this
res = await fetch('/api/meta-crawl', {
// Extract subject reference if available (for Image-to-Image)
const subjectRef = references.subject?.[0];
const imageUrl = subjectRef ? subjectRef.thumbnail : undefined; // Use full data URI from thumbnail property
res = await fetch('/api/meta/generate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt: finalPrompt,
cookies: settings.metaCookies,
num_images: 4 // Meta AI always returns 4 images
prompt: metaPrompt,
cookies: typeof mergedCookies === 'string' ? mergedCookies : JSON.stringify(mergedCookies),
imageCount: 4, // Meta AI always returns 4 images
useMetaFreeWrapper: settings.useMetaFreeWrapper,
metaFreeWrapperUrl: settings.metaFreeWrapperUrl
})
});
} else {
@ -129,10 +175,10 @@ export function PromptHero() {
for (const img of data.images) {
await addToGallery({
data: img.data || img.url, // Use URL as fallback (Meta AI returns URLs)
prompt: img.prompt,
prompt: finalPrompt, // Use original user prompt to avoid showing engineered prompts
aspectRatio: img.aspectRatio || settings.aspectRatio,
createdAt: Date.now(),
provider: provider as 'whisk' | 'grok' | 'meta'
provider: provider as 'whisk' | 'meta'
});
}
}
@ -207,79 +253,7 @@ export function PromptHero() {
}
};
// Handle video generation (Meta AI only) - uses Subject reference image
const handleGenerateVideo = async () => {
const subjectRefs = references.subject || [];
if (subjectRefs.length === 0) {
setErrorNotification({ message: '📷 Please upload a Subject image first', type: 'warning' });
setTimeout(() => setErrorNotification(null), 4000);
return;
}
const finalPrompt = prompt.trim() || "Animate this image with natural, cinematic movement";
if (!settings.metaCookies) {
setShowCookieExpired(true);
return;
}
setIsGeneratingVideo(true);
setIsGenerating(true);
try {
const imageBase64 = subjectRefs[0].thumbnail;
console.log('[PromptHero] Starting Meta AI image-to-video...');
const res = await fetch('/api/meta/video', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt: finalPrompt,
cookies: settings.metaCookies,
imageBase64: imageBase64
})
});
const data = await res.json();
if (data.error) throw new Error(data.error);
if (data.success && data.videos?.length > 0) {
for (const video of data.videos) {
addVideo({
id: crypto.randomUUID(),
url: video.url,
prompt: video.prompt || finalPrompt,
thumbnail: imageBase64,
createdAt: Date.now()
});
}
setErrorNotification({
message: `🎬 Video generated! Check the gallery.`,
type: 'warning'
});
setTimeout(() => setErrorNotification(null), 5000);
} else {
throw new Error('No videos generated');
}
} catch (e: any) {
console.error('[Video Gen]', e);
const errorMessage = e.message || 'Video generation failed';
if (errorMessage.includes('401') || errorMessage.includes('cookies')) {
setShowCookieExpired(true);
}
setErrorNotification({
message: `🎬 Video Error: ${errorMessage}`,
type: 'error'
});
setTimeout(() => setErrorNotification(null), 8000);
} finally {
setIsGeneratingVideo(false);
setIsGenerating(false);
}
};
// Note: Meta AI Video generation was removed - use Whisk for video generation from the gallery lightbox
const handleKeyDown = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && (e.metaKey || e.ctrlKey)) {
@ -322,17 +296,27 @@ export function PromptHero() {
};
const uploadReference = async (file: File, category: ReferenceCategory) => {
if (!settings.whiskCookies) {
// Enforce Whisk cookies ONLY if using Whisk provider
if ((!settings.provider || settings.provider === 'whisk') && !settings.whiskCookies) {
alert("Please set your Whisk Cookies in Settings first!");
return;
}
setUploadingRefs(prev => ({ ...prev, [category]: true }));
try {
const reader = new FileReader();
reader.onload = async (e) => {
const base64 = e.target?.result as string;
if (!base64) return;
if (!base64) {
setUploadingRefs(prev => ({ ...prev, [category]: false }));
return;
}
let refId = '';
// If Whisk, upload to backend to get ID
if (!settings.provider || settings.provider === 'whisk') {
try {
const res = await fetch('/api/references/upload', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
@ -343,29 +327,40 @@ export function PromptHero() {
cookies: settings.whiskCookies
})
});
const data = await res.json();
if (data.id) {
// Add to array (supports multiple refs per category)
addReference(category, { id: data.id, thumbnail: base64 });
// Add to history
const newItem = {
id: data.id,
url: base64, // For local display history we use base64. Ideally we'd valid URL but this works for session.
category: category,
originalName: file.name
};
// exist check?
const exists = history.find(h => h.id === data.id);
if (!exists) {
setHistory([newItem, ...history]);
}
refId = data.id;
} else {
console.error("Upload failed details:", JSON.stringify(data));
alert(`Upload failed: ${data.error}\n\nDetails: ${JSON.stringify(data) || 'Check console'}`);
}
} catch (err) {
console.error("API Upload Error", err);
alert("API Upload failed");
}
} else {
// For Meta/Grok, just use local generated ID
refId = 'loc-' + Date.now() + Math.random().toString(36).substr(2, 5);
}
if (refId) {
// Add to array (supports multiple refs per category)
// Note: Store uses 'thumbnail' property for the image data
addReference(category, { id: refId, thumbnail: base64 });
// Add to history
const newItem = {
id: refId,
url: base64,
category: category,
originalName: file.name
};
const exists = history.find(h => h.id === refId);
if (!exists) {
setHistory([newItem, ...history].slice(0, 50));
}
}
setUploadingRefs(prev => ({ ...prev, [category]: false }));
};
reader.readAsDataURL(file);
@ -476,9 +471,7 @@ export function PromptHero() {
<div className="flex items-center justify-between mb-1">
<div className="flex items-center gap-3">
<div className="h-8 w-8 rounded-lg bg-gradient-to-br from-amber-500/20 to-purple-600/20 border border-white/5 flex items-center justify-center">
{settings.provider === 'grok' ? (
<Zap className="h-4 w-4 text-yellow-400" />
) : settings.provider === 'meta' ? (
{settings.provider === 'meta' ? (
<Brain className="h-4 w-4 text-blue-400" />
) : (
<Sparkles className="h-4 w-4 text-amber-300" />
@ -489,12 +482,10 @@ export function PromptHero() {
Create
<span className="text-[10px] font-medium text-white/40 border-l border-white/10 pl-2">
by <span className={cn(
settings.provider === 'grok' ? "text-yellow-400" :
settings.provider === 'meta' ? "text-blue-400" :
"text-amber-300"
)}>
{settings.provider === 'grok' ? 'Grok' :
settings.provider === 'meta' ? 'Meta AI' :
{settings.provider === 'meta' ? 'Meta AI' :
'Whisk'}
</span>
</span>
@ -517,19 +508,6 @@ export function PromptHero() {
<Sparkles className="h-3 w-3" />
<span className="hidden sm:inline">Whisk</span>
</button>
<button
onClick={() => setSettings({ provider: 'grok' })}
className={cn(
"flex items-center gap-1.5 px-2.5 py-1 rounded-md text-[10px] font-medium transition-all",
settings.provider === 'grok'
? "bg-white/10 text-white shadow-sm"
: "text-white/40 hover:text-white/70 hover:bg-white/5"
)}
title="Grok (xAI)"
>
<Zap className="h-3 w-3" />
<span className="hidden sm:inline">Grok</span>
</button>
<button
onClick={() => setSettings({ provider: 'meta' })}
className={cn(
@ -566,17 +544,15 @@ export function PromptHero() {
{/* Left Controls: References */}
{/* For Meta AI: Only Subject is enabled (for video generation), Scene/Style disabled */}
<div className="flex flex-wrap gap-2">
{(['subject', 'scene', 'style'] as ReferenceCategory[]).map((cat) => {
{((settings.provider === 'meta'
? ['subject']
: ['subject', 'scene', 'style']) as ReferenceCategory[]).map((cat) => {
const refs = references[cat] || [];
const hasRefs = refs.length > 0;
const isUploading = uploadingRefs[cat];
// For Meta AI: only Subject is enabled (for image-to-video), Scene/Style disabled
const isDisabledForMeta = settings.provider === 'meta' && cat !== 'subject';
return (
<div key={cat} className={cn(
"relative group",
isDisabledForMeta && "opacity-30 pointer-events-none grayscale"
)}>
<div key={cat} className="relative group">
<button
onClick={() => toggleReference(cat)}
onDragOver={handleDragOver}
@ -655,6 +631,8 @@ export function PromptHero() {
onChange={(e) => handleFileInputChange(e, 'style')}
/>
{/* Right Controls: Settings & Generate */}
<div className="flex flex-wrap items-center gap-2 w-full md:w-auto justify-end">
@ -662,12 +640,17 @@ export function PromptHero() {
<div className="flex items-center gap-0.5 bg-[#0E0E10] p-1 rounded-lg border border-white/10">
{/* Image Count */}
<button
onClick={cycleImageCount}
className="flex items-center gap-1 px-2 py-1 rounded-md text-[10px] font-medium text-white/60 hover:text-white hover:bg-white/5 transition-colors"
title="Number of images"
onClick={settings.provider === 'meta' ? undefined : cycleImageCount}
className={cn(
"flex items-center gap-1 px-2 py-1 rounded-md text-[10px] font-medium transition-colors",
settings.provider === 'meta'
? "text-blue-200/50 cursor-not-allowed"
: "text-white/60 hover:text-white hover:bg-white/5"
)}
title={settings.provider === 'meta' ? "Meta AI always generates 4 images" : "Number of images"}
>
<Hash className="h-3 w-3 opacity-70" />
<span>{settings.imageCount}</span>
<span>{settings.provider === 'meta' ? 4 : settings.imageCount}</span>
</button>
<div className="w-px h-3 bg-white/10 mx-1" />
@ -703,12 +686,10 @@ export function PromptHero() {
{/* Generate Button */}
<button
onClick={handleGenerate}
disabled={isGenerating || !prompt.trim() || settings.provider === 'grok'}
disabled={isGenerating || !prompt.trim()}
className={cn(
"relative overflow-hidden px-4 py-1.5 rounded-lg font-bold text-sm text-white shadow-lg transition-all active:scale-95 group border border-white/10",
settings.provider === 'grok'
? "bg-gray-700 cursor-not-allowed"
: "bg-gradient-to-r from-purple-600 to-indigo-600 hover:from-purple-500 hover:to-indigo-500 hover:shadow-indigo-500/25"
"bg-gradient-to-r from-purple-600 to-indigo-600 hover:from-purple-500 hover:to-indigo-500 hover:shadow-indigo-500/25"
)}
>
<div className="relative z-10 flex items-center gap-1.5">
@ -717,10 +698,6 @@ export function PromptHero() {
<div className="h-3 w-3 animate-spin rounded-full border-2 border-white border-t-transparent" />
<span className="animate-pulse">Dreaming...</span>
</>
) : settings.provider === 'grok' ? (
<>
<span className="opacity-80">Soon</span>
</>
) : (
<>
<Sparkles className="h-3 w-3 group-hover:rotate-12 transition-transform" />
@ -730,34 +707,7 @@ export function PromptHero() {
</div>
</button>
{/* Generate Video Button - Only for Meta AI when Subject is uploaded */}
{settings.provider === 'meta' && (references.subject?.length ?? 0) > 0 && (
<button
onClick={handleGenerateVideo}
disabled={isGenerating}
className={cn(
"relative overflow-hidden px-4 py-1.5 rounded-lg font-bold text-sm text-white shadow-lg transition-all active:scale-95 group border border-white/10",
isGenerating
? "bg-gray-700 cursor-not-allowed"
: "bg-gradient-to-r from-blue-600 to-cyan-600 hover:from-blue-500 hover:to-cyan-500 hover:shadow-cyan-500/25"
)}
title="Animate the subject image into video"
>
<div className="relative z-10 flex items-center gap-1.5">
{isGeneratingVideo ? (
<>
<div className="h-3 w-3 animate-spin rounded-full border-2 border-white border-t-transparent" />
<span className="animate-pulse">Creating...</span>
</>
) : (
<>
<Video className="h-3 w-3 group-hover:scale-110 transition-transform" />
<span>Video</span>
</>
)}
</div>
</button>
)}
</div>
</div>

View file

@ -2,14 +2,13 @@
import React from 'react';
import { useStore } from '@/lib/store';
import { Save, Sparkles, Zap, Brain } from 'lucide-react';
import { Save, Sparkles, Brain, Settings2 } from 'lucide-react';
import { cn } from '@/lib/utils';
type Provider = 'whisk' | 'grok' | 'meta';
type Provider = 'whisk' | 'meta';
const providers: { id: Provider; name: string; icon: any; description: string }[] = [
{ id: 'whisk', name: 'Google Whisk', icon: Sparkles, description: 'ImageFX / Imagen 3' },
// { id: 'grok', name: 'Grok (xAI)', icon: Zap, description: 'FLUX.1 model' },
{ id: 'meta', name: 'Meta AI', icon: Brain, description: 'Imagine / Emu' },
];
@ -19,18 +18,20 @@ export function Settings() {
// Local state for form fields
const [provider, setProvider] = React.useState<Provider>(settings.provider || 'whisk');
const [whiskCookies, setWhiskCookies] = React.useState(settings.whiskCookies || '');
const [grokApiKey, setGrokApiKey] = React.useState(settings.grokApiKey || '');
const [grokCookies, setGrokCookies] = React.useState(settings.grokCookies || '');
const [useMetaFreeWrapper, setUseMetaFreeWrapper] = React.useState(settings.useMetaFreeWrapper !== undefined ? settings.useMetaFreeWrapper : true);
const [metaFreeWrapperUrl, setMetaFreeWrapperUrl] = React.useState(settings.metaFreeWrapperUrl || 'http://localhost:8000');
const [metaCookies, setMetaCookies] = React.useState(settings.metaCookies || '');
const [facebookCookies, setFacebookCookies] = React.useState(settings.facebookCookies || '');
const [saved, setSaved] = React.useState(false);
const handleSave = () => {
setSettings({
provider,
whiskCookies,
grokApiKey,
grokCookies,
metaCookies
useMetaFreeWrapper,
metaFreeWrapperUrl,
metaCookies,
facebookCookies
});
setSaved(true);
setTimeout(() => setSaved(false), 2000);
@ -89,47 +90,54 @@ export function Settings() {
</div>
)}
{provider === 'grok' && (
{provider === 'meta' && (
<div className="space-y-4">
{/* Advanced Settings (Hidden by default) */}
<details className="group mb-4">
<summary className="flex items-center gap-2 cursor-pointer text-xs text-white/40 hover:text-white/60 mb-2 select-none">
<Settings2 className="h-3 w-3" />
<span>Advanced Configuration</span>
</summary>
<div className="pl-4 border-l border-white/5 space-y-4 mb-4">
<div className="flex items-center justify-between p-3 rounded-lg bg-secondary/30 border border-border/50">
<div className="space-y-0.5">
<label className="text-sm font-medium text-white/70">Use Free API Wrapper</label>
<p className="text-[10px] text-muted-foreground">Running locally via Docker</p>
</div>
<div className="flex items-center gap-2">
<span className={`text-xs ${useMetaFreeWrapper ? "text-primary font-medium" : "text-muted-foreground"}`}>{useMetaFreeWrapper ? "ON" : "OFF"}</span>
<button
onClick={() => setUseMetaFreeWrapper(!useMetaFreeWrapper)}
className={`relative inline-flex h-5 w-9 items-center rounded-full transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50 ${useMetaFreeWrapper ? "bg-primary" : "bg-input"}`}
>
<span className={`pointer-events-none block h-3.5 w-3.5 rounded-full bg-background shadow-lg ring-0 transition-transform ${useMetaFreeWrapper ? "translate-x-4" : "translate-x-0.5"}`} />
</button>
</div>
</div>
{useMetaFreeWrapper && (
<div className="space-y-2">
<label className="text-sm font-medium">Grok API Key (Recommended)</label>
<label className="text-sm font-medium text-white/70">Free Wrapper URL</label>
<input
type="password"
value={grokApiKey}
onChange={(e) => setGrokApiKey(e.target.value)}
placeholder="xai-..."
className="w-full p-3 rounded-lg bg-secondary/50 border border-border focus:ring-2 focus:ring-primary/50 outline-none font-mono text-sm"
type="text"
value={metaFreeWrapperUrl}
onChange={(e) => setMetaFreeWrapperUrl(e.target.value)}
placeholder="http://localhost:8000"
className="w-full p-2 rounded-lg bg-secondary/30 border border-border/50 focus:ring-1 focus:ring-primary/50 outline-none font-mono text-xs text-white/60"
/>
<p className="text-xs text-muted-foreground">
Get your API key from <a href="https://console.x.ai" target="_blank" className="underline hover:text-primary">console.x.ai</a>
</p>
</div>
<div className="relative">
<div className="absolute inset-0 flex items-center">
<div className="w-full border-t border-border"></div>
</div>
<div className="relative flex justify-center text-xs">
<span className="bg-card px-2 text-muted-foreground">or use cookies</span>
</div>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-muted-foreground">Grok Cookies (Alternative)</label>
<textarea
value={grokCookies}
onChange={(e) => setGrokCookies(e.target.value)}
placeholder="Paste cookies from grok.com..."
className="w-full h-24 p-3 rounded-lg bg-secondary/50 border border-border focus:ring-2 focus:ring-primary/50 outline-none font-mono text-xs"
/>
<p className="text-xs text-muted-foreground">
Get from logged-in <a href="https://grok.com" target="_blank" className="underline hover:text-primary">grok.com</a> session.
</p>
</div>
</div>
)}
</div>
</details>
{provider === 'meta' && (
<div className="space-y-2">
<label className="text-sm font-medium">Meta AI Cookies</label>
<div className="pt-2 border-t border-white/5">
<p className="text-sm font-medium mb-3 text-amber-400">Authentication Required</p>
{/* Meta AI Cookies */}
<div className="space-y-2 mb-4">
<label className="text-sm font-medium">Meta.ai Cookies</label>
<textarea
value={metaCookies}
onChange={(e) => setMetaCookies(e.target.value)}
@ -137,9 +145,25 @@ export function Settings() {
className="w-full h-32 p-3 rounded-lg bg-secondary/50 border border-border focus:ring-2 focus:ring-primary/50 outline-none font-mono text-xs"
/>
<p className="text-xs text-muted-foreground">
Get from logged-in <a href="https://www.meta.ai" target="_blank" className="underline hover:text-primary">meta.ai</a> session (requires Facebook login).
Get from logged-in <a href="https://www.meta.ai" target="_blank" className="underline hover:text-primary">meta.ai</a> session.
</p>
</div>
{/* Facebook Cookies */}
<div className="space-y-2">
<label className="text-sm font-medium">Facebook.com Cookies <span className="text-red-500">*</span></label>
<textarea
value={facebookCookies}
onChange={(e) => setFacebookCookies(e.target.value)}
placeholder="Paste cookies from facebook.com (REQUIRED for authentication)..."
className="w-full h-32 p-3 rounded-lg bg-secondary/50 border border-border focus:ring-2 focus:ring-primary/50 outline-none font-mono text-xs"
/>
<p className="text-xs text-muted-foreground">
<strong>Required:</strong> Meta AI authenticates via Facebook. Get from logged-in <a href="https://www.facebook.com" target="_blank" className="underline hover:text-primary">facebook.com</a> session using Cookie-Editor.
</p>
</div>
</div>
</div>
)}
</div>

View file

@ -72,7 +72,7 @@ export function VideoPromptModal({ isOpen, onClose, image, onGenerate }: VideoPr
{image && (
<>
<img
src={`data:image/png;base64,${image.data}`}
src={image.data.startsWith('data:') || image.data.startsWith('http') ? image.data : `data:image/png;base64,${image.data}`}
alt="Source"
className="w-full h-full object-cover"
/>

File diff suppressed because one or more lines are too long

View file

@ -7,20 +7,10 @@ services:
- "8558:3000"
environment:
- NODE_ENV=production
- CRAWL4AI_URL=http://crawl4ai:8000
depends_on:
- crawl4ai
crawl4ai:
build:
context: ./services/crawl4ai
dockerfile: Dockerfile
container_name: crawl4ai
metaai-free-api:
build: ./services/metaai-api
container_name: metaai-free-api
restart: unless-stopped
ports:
- "8559:8000"
environment:
- META_RATE_LIMIT_DELAY=60
- META_MAX_REQUESTS_HOUR=20
- BROWSER_HEADLESS=true
shm_size: '1gb'
- "8000:8000"

View file

@ -1,300 +1,22 @@
import { Prompt } from './types';
const JIMMYLV_SOURCE_URL = "https://raw.githubusercontent.com/JimmyLv/awesome-nano-banana/main/cases";
const YOUMIND_README_URL = "https://raw.githubusercontent.com/YouMind-OpenLab/awesome-nano-banana-pro-prompts/main/README.md";
const ZEROLU_README_URL = "https://raw.githubusercontent.com/ZeroLu/awesome-nanobanana-pro/main/README.md";
const MAX_CASE_ID = 200; // Increased limit slightly
const BATCH_SIZE = 10;
import { Prompt } from '@/lib/types';
export class JimmyLvCrawler {
async crawl(limit: number = 300): Promise<Prompt[]> {
console.log(`Starting crawl for ${limit} cases...`);
const prompts: Prompt[] = [];
// Create batches of IDs to fetch
const ids = Array.from({ length: limit }, (_, i) => i + 1);
for (let i = 0; i < ids.length; i += BATCH_SIZE) {
const batch = ids.slice(i, i + BATCH_SIZE);
// console.log(`Fetching batch ${i + 1} to ${i + batch.length}...`);
const results = await Promise.all(
batch.map(id => this.fetchCase(id))
);
results.forEach(p => {
if (p) prompts.push(p);
});
}
console.log(`[JimmyLv] Crawled ${prompts.length} valid prompts.`);
return prompts;
}
private async fetchCase(id: number): Promise<Prompt | null> {
try {
const url = `${JIMMYLV_SOURCE_URL}/${id}/case.yml`;
const res = await fetch(url);
if (!res.ok) {
// console.warn(`Failed to fetch ${url}: ${res.status}`);
return null;
}
const text = await res.text();
return this.parseCase(text, id);
} catch (error) {
console.error(`Error fetching case ${id}:`, error);
return null;
}
}
private parseCase(content: string, caseId: number): Prompt | null {
try {
// Extract title
let title = this.extract(content, /title_en:\s*(.+)/);
if (!title) title = this.extract(content, /title:\s*(.+)/) || "Unknown";
// Extract prompt (Multi-line block scalar)
let promptText = "";
const promptMatch = content.match(/prompt_en:\s*\|\s*\n((?: .+\n)+)/) ||
content.match(/prompt:\s*\|\s*\n((?: .+\n)+)/);
if (promptMatch) {
promptText = promptMatch[1]
.split('\n')
.map(line => line.trim())
.join(' ')
.trim();
}
if (!promptText) {
// Try simpler single line prompt
promptText = this.extract(content, /prompt:\s*(.+)/) || "";
}
if (!promptText) return null;
// Extract image filename
const imageFilename = this.extract(content, /image:\s*(.+)/);
let imageUrl = "";
if (imageFilename) {
imageUrl = `${JIMMYLV_SOURCE_URL}/${caseId}/${imageFilename}`;
}
// Extract author
const author = this.extract(content, /author:\s*"?([^"\n]+)"?/) || "JimmyLv Repo";
const category = this.inferCategory(title, promptText);
return {
id: 0, // Will be assigned by manager
title: title.slice(0, 150),
prompt: promptText,
category,
category_type: "style", // Simplified
description: promptText.slice(0, 200) + (promptText.length > 200 ? "..." : ""),
images: imageUrl ? [imageUrl] : [],
author,
source: "jimmylv",
source_url: `https://github.com/JimmyLv/awesome-nano-banana/tree/main/cases/${caseId}`
};
} catch (error) {
return null;
}
}
private extract(content: string, regex: RegExp): string | null {
const match = content.match(regex);
return match ? match[1].trim() : null;
}
private inferCategory(title: string, prompt: string): string {
const text = (title + " " + prompt).toLowerCase();
const rules: [string[], string][] = [
[["ghibli", "anime", "cartoon", "chibi", "comic", "illustration", "drawing"], "Illustration"],
[["icon", "logo", "symbol"], "Logo / Icon"],
[["product", "packaging", "mockup"], "Product"],
[["avatar", "profile", "headshot"], "Profile / Avatar"],
[["infographic", "chart", "diagram"], "Infographic / Edu Visual"],
[["cinematic", "film", "movie"], "Cinematic / Film Still"],
[["3d", "render", "blender"], "3D Render"],
[["pixel", "8-bit", "retro game"], "Pixel Art"],
];
for (const [keywords, cat] of rules) {
if (keywords.some(k => text.includes(k))) return cat;
}
return "Photography";
async crawl(): Promise<Prompt[]> {
console.log("[JimmyLvCrawler] Crawling not implemented");
return [];
}
}
export class YouMindCrawler {
async crawl(): Promise<Prompt[]> {
console.log(`[YouMind] Starting crawl of README...`);
const prompts: Prompt[] = [];
try {
const res = await fetch(YOUMIND_README_URL);
if (!res.ok) throw new Error("Failed to fetch YouMind README");
const text = await res.text();
// Split by "### No." sections
const sections = text.split(/### No\./g).slice(1);
let idCounter = 1;
for (const section of sections) {
const prompt = this.parseSection(section, idCounter++);
if (prompt) prompts.push(prompt);
}
} catch (e) {
console.error("[YouMind] Crawl failed", e);
}
console.log(`[YouMind] Crawled ${prompts.length} valid prompts.`);
return prompts;
}
private parseSection(content: string, index: number): Prompt | null {
try {
// Title: First line after number
const titleMatch = content.match(/\s*\d+:\s*(.+)/);
const title = titleMatch ? titleMatch[1].trim() : `YouMind Case ${index}`;
// Prompt Block
const promptMatch = content.match(/```\s*([\s\S]*?)\s*```/);
// Some sections might have multiple blocks, assume first large one is prompt?
// The README format shows prompt in a code block under #### 📝 Prompt
// Better regex: look for #### 📝 Prompt\n\n```\n...
const strictPromptMatch = content.match(/#### 📝 Prompt\s+```[\s\S]*?\n([\s\S]*?)```/);
const promptText = strictPromptMatch ? strictPromptMatch[1].trim() : (promptMatch ? promptMatch[1].trim() : "");
if (!promptText) return null;
// Images
const imageMatches = [...content.matchAll(/<img src="(.*?)"/g)];
const images = imageMatches.map(m => m[1]).filter(url => !url.includes("img.shields.io")); // Exclude badges
// Author / Source
const authorMatch = content.match(/- \*\*Author:\*\* \[(.*?)\]/);
const author = authorMatch ? authorMatch[1] : "YouMind Community";
const sourceMatch = content.match(/- \*\*Source:\*\* \[(.*?)\]\((.*?)\)/);
const sourceUrl = sourceMatch ? sourceMatch[2] : `https://github.com/YouMind-OpenLab/awesome-nano-banana-pro-prompts#no-${index}`;
return {
id: 0,
title,
prompt: promptText,
category: this.inferCategory(title, promptText),
category_type: "style",
description: title,
images,
author,
source: "youmind",
source_url: sourceUrl
};
} catch (e) {
return null;
}
}
private inferCategory(title: string, prompt: string): string {
// Reuse similar logic, maybe static util later
const text = (title + " " + prompt).toLowerCase();
if (text.includes("logo") || text.includes("icon")) return "Logo / Icon";
if (text.includes("3d")) return "3D Render";
if (text.includes("photo") || text.includes("realistic")) return "Photography";
return "Illustration";
console.log("[YouMindCrawler] Crawling not implemented");
return [];
}
}
export class ZeroLuCrawler {
async crawl(): Promise<Prompt[]> {
console.log(`[ZeroLu] Starting crawl of README...`);
const prompts: Prompt[] = [];
try {
const res = await fetch(ZEROLU_README_URL);
if (!res.ok) throw new Error("Failed to fetch ZeroLu README");
const text = await res.text();
// Split by H3 headers like "### 1.1 " or "### 1.2 "
// The format is `### X.X. Title`
const sections = text.split(/### \d+\.\d+\.?\s+/).slice(1);
// We need to capture the title which was consumed by split, or use matchAll
// Better to use regex global match to find headers and their content positions.
// Or just split and accept title is lost? No, title is important.
// Alternative loop:
const regex = /### (\d+\.\d+\.?\s+.*?)\n([\s\S]*?)(?=### \d+\.\d+|$)/g;
let match;
let count = 0;
while ((match = regex.exec(text)) !== null) {
const title = match[1].trim();
const body = match[2];
const prompt = this.parseSection(title, body);
if (prompt) prompts.push(prompt);
count++;
}
} catch (e) {
console.error("[ZeroLu] Crawl failed", e);
}
console.log(`[ZeroLu] Crawled ${prompts.length} valid prompts.`);
return prompts;
}
private parseSection(title: string, content: string): Prompt | null {
// Extract Prompt
// Format: **Prompt:**\n\n```\n...\n```
const promptMatch = content.match(/\*\*Prompt:\*\*\s*[\n\r]*```[\w]*([\s\S]*?)```/);
if (!promptMatch) return null;
const promptText = promptMatch[1].trim();
// Extract Images
// Markdown image: ![...](url) or HTML <img src="...">
const mdImageMatch = content.match(/!\[.*?\]\((.*?)\)/);
const htmlImageMatch = content.match(/<img.*?src="(.*?)".*?>/);
let imageUrl = mdImageMatch ? mdImageMatch[1] : (htmlImageMatch ? htmlImageMatch[1] : "");
// Clean URL if it has query params (sometimes github adds them) unless needed
// Assuming raw github images work fine.
// Source
const sourceMatch = content.match(/Source: \[@(.*?)\]\((.*?)\)/);
const sourceUrl = sourceMatch ? sourceMatch[2] : `https://github.com/ZeroLu/awesome-nanobanana-pro#${title.toLowerCase().replace(/\s+/g, '-')}`;
const author = sourceMatch ? sourceMatch[1] : "ZeroLu Community";
return {
id: 0,
title,
prompt: promptText,
category: this.inferCategory(title, promptText),
category_type: "style",
description: title,
images: imageUrl ? [imageUrl] : [],
author,
source: "zerolu",
source_url: sourceUrl
};
}
private inferCategory(title: string, prompt: string): string {
const text = (title + " " + prompt).toLowerCase();
if (text.includes("logo") || text.includes("icon")) return "Logo / Icon";
if (text.includes("3d")) return "3D Render";
if (text.includes("photo") || text.includes("realistic") || text.includes("selfie")) return "Photography";
return "Illustration";
console.log("[ZeroLuCrawler] Crawling not implemented");
return [];
}
}

View file

@ -1,246 +0,0 @@
/**
* Grok/xAI Client for Image Generation
*
* Supports two authentication methods:
* 1. Official API Key from console.x.ai (recommended)
* 2. Cookie-based auth from logged-in grok.com session
*
* Image Model: FLUX.1 by Black Forest Labs
*/
// Official xAI API endpoint
const XAI_API_BASE = "https://api.x.ai/v1";
// Grok web interface endpoint (for cookie-based auth)
const GROK_WEB_BASE = "https://grok.com";
interface GrokGenerateOptions {
prompt: string;
apiKey?: string;
cookies?: string;
numImages?: number;
}
interface GrokImageResult {
url: string;
data?: string; // base64
prompt: string;
model: string;
}
export class GrokClient {
private apiKey?: string;
private cookies?: string;
constructor(options: { apiKey?: string; cookies?: string }) {
this.apiKey = options.apiKey;
this.cookies = this.normalizeCookies(options.cookies);
}
/**
* Normalize cookies from string or JSON format
* Handles cases where user pastes JSON array from extension/devtools
*/
private normalizeCookies(cookies?: string): string | undefined {
if (!cookies) return undefined;
try {
// Check if it looks like JSON
if (cookies.trim().startsWith('[')) {
const parsed = JSON.parse(cookies);
if (Array.isArray(parsed)) {
return parsed
.map((c: any) => `${c.name}=${c.value}`)
.join('; ');
}
}
} catch (e) {
// Not JSON, assume string
}
return cookies;
}
/**
* Generate images using Grok/xAI
* Prefers official API if apiKey is provided, falls back to cookie-based
*/
async generate(prompt: string, numImages: number = 1): Promise<GrokImageResult[]> {
if (this.apiKey) {
return this.generateWithAPI(prompt, numImages);
} else if (this.cookies) {
return this.generateWithCookies(prompt, numImages);
} else {
throw new Error("Grok: No API key or cookies provided. Configure in Settings.");
}
}
/**
* Generate using official xAI API (recommended)
* Requires API key from console.x.ai
*/
private async generateWithAPI(prompt: string, numImages: number): Promise<GrokImageResult[]> {
console.log(`[Grok API] Generating ${numImages} image(s) for: "${prompt.substring(0, 50)}..."`);
const response = await fetch(`${XAI_API_BASE}/images/generations`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.apiKey}`
},
body: JSON.stringify({
model: "grok-2-image",
prompt: prompt,
n: numImages,
response_format: "url" // or "b64_json"
})
});
if (!response.ok) {
const errorText = await response.text();
console.error("[Grok API] Error:", response.status, errorText);
throw new Error(`Grok API Error: ${response.status} - ${errorText.substring(0, 200)}`);
}
const data = await response.json();
console.log("[Grok API] Response:", JSON.stringify(data, null, 2));
// Parse response - xAI uses OpenAI-compatible format
const images: GrokImageResult[] = (data.data || []).map((img: any) => ({
url: img.url || (img.b64_json ? `data:image/png;base64,${img.b64_json}` : ''),
data: img.b64_json,
prompt: prompt,
model: "grok-2-image"
}));
if (images.length === 0) {
throw new Error("Grok API returned no images");
}
return images;
}
/**
* Generate using Grok web interface (cookie-based)
* Requires cookies from logged-in grok.com session
*/
private async generateWithCookies(prompt: string, numImages: number): Promise<GrokImageResult[]> {
console.log(`[Grok Web] Generating image for: "${prompt.substring(0, 50)}..."`);
// The Grok web interface uses a chat-based API
// We need to send a message asking for image generation
const imagePrompt = `Generate an image: ${prompt}`;
const response = await fetch(`${GROK_WEB_BASE}/rest/app-chat/conversations/new`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Cookie": this.cookies!,
"Origin": GROK_WEB_BASE,
"Referer": `${GROK_WEB_BASE}/`,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty"
},
body: JSON.stringify({
temporary: false,
modelName: "grok-3",
message: imagePrompt,
fileAttachments: [],
imageAttachments: [],
disableSearch: false,
enableImageGeneration: true,
returnImageBytes: false,
returnRawGrokInXaiRequest: false,
sendFinalMetadata: true,
customInstructions: "",
deepsearchPreset: "",
isReasoning: false
})
});
if (!response.ok) {
const errorText = await response.text();
console.error("[Grok Web] Error:", response.status, errorText);
throw new Error(`Grok Web Error: ${response.status} - ${errorText.substring(0, 200)}`);
}
// Parse streaming response to find image URLs
const text = await response.text();
console.log("[Grok Web] Response length:", text.length);
// Look for generated image URLs in the response
const imageUrls = this.extractImageUrls(text);
if (imageUrls.length === 0) {
console.warn("[Grok Web] No image URLs found in response. Response preview:", text.substring(0, 500));
throw new Error("Grok did not generate any images. Try a different prompt or check your cookies.");
}
return imageUrls.map(url => ({
url,
prompt,
model: "grok-3"
}));
}
/**
* Extract image URLs from Grok's streaming response
*/
private extractImageUrls(responseText: string): string[] {
const urls: string[] = [];
// Try to parse as JSON lines (NDJSON format)
const lines = responseText.split('\n').filter(line => line.trim());
for (const line of lines) {
try {
const data = JSON.parse(line);
// Check for generatedImageUrls field
if (data.generatedImageUrls && Array.isArray(data.generatedImageUrls)) {
urls.push(...data.generatedImageUrls);
}
// Check for imageUrls in result
if (data.result?.imageUrls) {
urls.push(...data.result.imageUrls);
}
// Check for media attachments
if (data.attachments) {
for (const attachment of data.attachments) {
if (attachment.type === 'image' && attachment.url) {
urls.push(attachment.url);
}
}
}
} catch {
// Not JSON, try regex extraction
}
}
// Fallback: regex for image URLs
if (urls.length === 0) {
const urlRegex = /https:\/\/[^"\s]+\.(png|jpg|jpeg|webp)/gi;
const matches = responseText.match(urlRegex);
if (matches) {
urls.push(...matches);
}
}
// Deduplicate
return [...new Set(urls)];
}
/**
* Download image from URL and convert to base64
*/
async downloadAsBase64(url: string): Promise<string> {
const response = await fetch(url);
const buffer = await response.arrayBuffer();
const base64 = Buffer.from(buffer).toString('base64');
return base64;
}
}

View file

@ -27,16 +27,32 @@ interface MetaSession {
lsd?: string;
fb_dtsg?: string;
accessToken?: string;
externalConversationId?: string;
}
// Aspect ratio types for Meta AI
export type AspectRatio = 'portrait' | 'landscape' | 'square';
const ORIENTATION_MAP: Record<AspectRatio, string> = {
'portrait': 'VERTICAL', // 9:16
'landscape': 'HORIZONTAL', // 16:9
'square': 'SQUARE' // 1:1
};
export class MetaAIClient {
private cookies: string;
private session: MetaSession = {};
private useFreeWrapper: boolean = true;
private freeWrapperUrl: string = 'http://localhost:8000';
constructor(options: MetaAIOptions) {
constructor(options: MetaAIOptions & { useFreeWrapper?: boolean; freeWrapperUrl?: string }) {
this.cookies = this.normalizeCookies(options.cookies);
this.useFreeWrapper = options.useFreeWrapper !== undefined ? options.useFreeWrapper : true;
this.freeWrapperUrl = options.freeWrapperUrl || 'http://localhost:8000';
console.log("[Meta AI] Cookie string length:", this.cookies.length);
if (this.cookies) {
this.parseSessionFromCookies();
}
}
/**
* Normalize cookies from string or JSON format
@ -79,11 +95,32 @@ export class MetaAIClient {
}
}
/**
* Get the initialized session tokens (call initSession first if not done)
*/
async getSession(): Promise<MetaSession & { externalConversationId?: string }> {
if (!this.useFreeWrapper && !this.session.lsd && !this.session.fb_dtsg) {
await this.initSession();
}
return this.session;
}
/**
* Get the normalized cookie string
*/
getCookies(): string {
return this.cookies;
}
/**
* Generate images using Meta AI's Imagine model
*/
async generate(prompt: string, numImages: number = 4): Promise<MetaImageResult[]> {
console.log(`[Meta AI] Generating images for: "${prompt.substring(0, 50)}..."`);
async generate(prompt: string, numImages: number = 4, aspectRatio: AspectRatio = 'portrait'): Promise<MetaImageResult[]> {
console.log(`[Meta AI] Generating images for: "${prompt.substring(0, 50)}..." (${aspectRatio})`);
if (this.useFreeWrapper) {
return this.generateWithFreeWrapper(prompt, numImages);
}
// First, get the access token and session info if not already fetched
if (!this.session.accessToken) {
@ -95,8 +132,8 @@ export class MetaAIClient {
? prompt
: `Imagine ${prompt}`;
// Send the prompt via GraphQL
const response = await this.sendPrompt(imagePrompt);
// Send the prompt via GraphQL with aspect ratio
const response = await this.sendPrompt(imagePrompt, aspectRatio);
// Extract image URLs from response
const images = this.extractImages(response, prompt);
@ -111,6 +148,92 @@ export class MetaAIClient {
return images;
}
/**
* Generate using free API wrapper (mir-ashiq/metaai-api)
* Connects to local docker service
*/
/**
* Generate using free API wrapper (mir-ashiq/metaai-api)
* Connects to local docker service
*/
private async generateWithFreeWrapper(prompt: string, numImages: number): Promise<MetaImageResult[]> {
console.log(`[Meta Wrapper] Generating image for: "${prompt.substring(0, 50)}..." via ${this.freeWrapperUrl}`);
const cookieDict = this.parseCookiesToDict(this.cookies);
const response = await fetch(`${this.freeWrapperUrl}/chat`, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify({
message: `Imagine ${prompt}`,
stream: false,
cookies: cookieDict
})
});
if (!response.ok) {
const errorText = await response.text();
console.error("[Meta Wrapper] Error:", response.status, errorText);
throw new Error(`Meta Wrapper Error: ${response.status} - ${errorText.substring(0, 200)}`);
}
const data = await response.json();
console.log("[Meta Wrapper] Response:", JSON.stringify(data, null, 2).substring(0, 500));
// Check for images in response
const images: MetaImageResult[] = [];
// mir-ashiq/metaai-api returns { media: [{ url: "...", ... }] }
if (data.media && Array.isArray(data.media)) {
data.media.forEach((m: any) => {
if (m.url) images.push({ url: m.url, prompt, model: "meta-wrapper" });
});
}
// Fallback checks
if (images.length === 0 && data.images && Array.isArray(data.images)) {
data.images.forEach((url: string) => {
images.push({ url, prompt, model: "meta-wrapper" });
});
}
if (images.length === 0 && data.sources && Array.isArray(data.sources)) {
data.sources.forEach((s: any) => {
if (s.url && (s.url.includes('.jpg') || s.url.includes('.png') || s.url.includes('.webp'))) {
images.push({ url: s.url, prompt, model: "meta-wrapper-source" });
}
});
}
if (images.length === 0) {
console.warn("[Meta Wrapper] No images found via /chat endpoint", data);
throw new Error("Meta Wrapper returned no images. Please check if the prompt triggered image generation.");
}
return images;
}
private parseCookiesToDict(cookieStr: string): Record<string, string> {
const dict: Record<string, string> = {};
if (!cookieStr) return dict;
// Handle basic key=value; format
cookieStr.split(';').forEach(pair => {
const [key, ...rest] = pair.trim().split('=');
if (key && rest.length > 0) {
dict[key] = rest.join('=');
}
});
return dict;
}
private async generateWithFreeWrapperFallback(prompt: string): Promise<MetaImageResult[]> {
// Fallback logic not needed if /chat works
throw new Error("Meta Wrapper endpoint /chat failed.");
}
/**
* Initialize session - get access token from meta.ai page
*/
@ -166,28 +289,63 @@ export class MetaAIClient {
this.session.fb_dtsg = dtsgMatch[1];
}
// We no longer strictly enforce accessToken presence here
// as some requests might work with just cookies
// Enhanced logging for debugging
console.log("[Meta AI] Session tokens extracted:", {
hasAccessToken: !!this.session.accessToken,
hasLsd: !!this.session.lsd,
hasDtsg: !!this.session.fb_dtsg
});
if (!this.session.accessToken && !this.session.lsd) {
console.warn("[Meta AI] CRITICAL: No authentication tokens found. Check if cookies are valid.");
}
}
/**
* Send prompt via GraphQL mutation
*/
private async sendPrompt(prompt: string): Promise<any> {
/**
* Send prompt via GraphQL mutation (Abra - for Image Generation)
* Using EXACT variable structure from Python Strvm/meta-ai-api library
*/
private async sendPrompt(prompt: string, aspectRatio: AspectRatio = 'portrait'): Promise<any> {
// Generate external conversation ID (UUID) and offline threading ID (Snowflake-like)
const externalConversationId = crypto.randomUUID();
const timestamp = Date.now();
const randomPart = Math.floor(Math.random() * 4194304); // 22 bits
const offlineThreadingId = ((BigInt(timestamp) << 22n) | BigInt(randomPart)).toString();
// Store for polling
this.session.externalConversationId = externalConversationId;
// Map aspect ratio to Meta AI orientation
const orientation = ORIENTATION_MAP[aspectRatio];
const variables = {
message: {
text: prompt,
content_type: "TEXT"
sensitive_string_value: prompt // Python uses sensitive_string_value, not text!
},
source: "PDT_CHAT_INPUT",
external_message_id: Math.random().toString(36).substring(2) + Date.now().toString(36)
externalConversationId: externalConversationId,
offlineThreadingId: offlineThreadingId,
suggestedPromptIndex: null,
flashVideoRecapInput: { images: [] },
flashPreviewInput: null,
promptPrefix: null,
entrypoint: "ABRA__CHAT__TEXT",
icebreaker_type: "TEXT",
imagineClientOptions: { orientation: orientation }, // Aspect ratio control
__relay_internal__pv__AbraDebugDevOnlyrelayprovider: false,
__relay_internal__pv__WebPixelRatiorelayprovider: 1
};
console.log("[Meta AI] Sending Variables:", JSON.stringify(variables, null, 2));
const body = new URLSearchParams({
fb_api_caller_class: "RelayModern",
fb_api_req_friendly_name: "useAbraSendMessageMutation",
variables: JSON.stringify(variables),
doc_id: "7783822248314888",
server_timestamps: "true",
doc_id: "7783822248314888", // Abra Mutation ID
...(this.session.lsd && { lsd: this.session.lsd }),
...(this.session.fb_dtsg && { fb_dtsg: this.session.fb_dtsg })
});
@ -209,25 +367,56 @@ export class MetaAIClient {
body: body.toString()
});
const rawText = await response.text();
console.log("[Meta AI] Response received, parsing streaming data...");
if (!response.ok) {
const errorText = await response.text();
console.error("[Meta AI] GraphQL Error:", response.status, errorText);
throw new Error(`Meta AI Error: ${response.status} - ${errorText.substring(0, 200)}`);
throw new Error(`Meta AI Error: ${response.status} - ${rawText.substring(0, 500)}`);
}
// Check if response is actually JSON
const contentType = response.headers.get("content-type");
if (contentType && contentType.includes("text/html")) {
const text = await response.text();
if (text.includes("login_form") || text.includes("facebook.com/login")) {
// Meta AI returns streaming response (multiple JSON lines)
// We need to find the final response where streaming_state === "OVERALL_DONE"
let lastValidResponse: any = null;
const lines = rawText.split('\n');
for (const line of lines) {
if (!line.trim()) continue;
try {
const parsed = JSON.parse(line);
// Check for streaming state in both direct and nested paths
const streamingState =
parsed?.data?.xfb_abra_send_message?.bot_response_message?.streaming_state ||
parsed?.data?.node?.bot_response_message?.streaming_state;
if (streamingState === "OVERALL_DONE") {
console.log("[Meta AI] Found OVERALL_DONE response");
lastValidResponse = parsed;
break;
}
// Keep track of any valid response with imagine_card
const imagineCard =
parsed?.data?.xfb_abra_send_message?.bot_response_message?.imagine_card ||
parsed?.data?.node?.bot_response_message?.imagine_card;
if (imagineCard?.session?.media_sets) {
lastValidResponse = parsed;
}
} catch (e) {
// Skip non-JSON lines
continue;
}
}
if (!lastValidResponse) {
if (rawText.includes("login_form") || rawText.includes("facebook.com/login")) {
throw new Error("Meta AI: Session expired. Please refresh your cookies.");
}
throw new Error(`Meta AI returned HTML error: ${text.substring(0, 100)}...`);
throw new Error("Meta AI: No valid response found in streaming data");
}
const data = await response.json();
console.log("[Meta AI] Response:", JSON.stringify(data, null, 2).substring(0, 500));
return data;
console.log("[Meta AI] Successfully parsed streaming response");
return lastValidResponse;
}
/**
@ -237,24 +426,88 @@ export class MetaAIClient {
const images: MetaImageResult[] = [];
// Navigate through the response structure
const messageData = response?.data?.node?.bot_response_message ||
// Abra streaming: xfb_abra_send_message.bot_response_message
// Abra direct: node.bot_response_message
// Kadabra: useKadabraSendMessageMutation.node.bot_response_message
const messageData = response?.data?.xfb_abra_send_message?.bot_response_message ||
response?.data?.useKadabraSendMessageMutation?.node?.bot_response_message ||
response?.data?.node?.bot_response_message ||
response?.data?.xabraAIPreviewMessageSendMutation?.message;
if (!messageData) {
// For Polling/KadabraPromptRootQuery which also contains imagine_cards
const pollMessages = response?.data?.kadabra_prompt?.messages?.edges || [];
if (pollMessages.length > 0) {
for (const edge of pollMessages) {
const nodeImages = this.extractImagesFromMessage(edge?.node, originalPrompt);
images.push(...nodeImages);
}
}
if (messageData) {
images.push(...this.extractImagesFromMessage(messageData, originalPrompt));
}
// --- STRENGTHENED FALLBACK ---
// If still no images, but we have data, do a recursive search for any CDN URLs
if (images.length === 0 && response?.data) {
console.log("[Meta AI] Structured extraction failed, attempting recursive search...");
const foundUrls = this.recursiveSearchForImages(response.data);
for (const url of foundUrls) {
images.push({
url: url,
prompt: originalPrompt,
model: "meta"
});
}
}
if (images.length === 0) {
console.log("[Meta AI] Extraction failed. Response keys:", Object.keys(response || {}));
if (response?.data) console.log("[Meta AI] Data keys:", Object.keys(response.data));
}
return images;
}
/**
* Recursive search for image-like URLs in the JSON tree
*/
private recursiveSearchForImages(obj: any, found: Set<string> = new Set()): string[] {
if (!obj || typeof obj !== 'object') return [];
for (const key in obj) {
const val = obj[key];
if (typeof val === 'string') {
if ((val.includes('fbcdn.net') || val.includes('meta.ai')) &&
(val.includes('.jpg') || val.includes('.png') || val.includes('.webp') || val.includes('image_uri=') || val.includes('/imagine/'))) {
found.add(val);
}
} else if (typeof val === 'object') {
this.recursiveSearchForImages(val, found);
}
}
return Array.from(found);
}
/**
* Helper to extract images from a single message node
*/
private extractImagesFromMessage(messageData: any, originalPrompt: string): MetaImageResult[] {
const images: MetaImageResult[] = [];
if (!messageData) return images;
// Check for imagine_card (image generation response)
const imagineCard = messageData?.imagine_card;
if (imagineCard?.session?.media_sets) {
for (const mediaSet of imagineCard.session.media_sets) {
if (mediaSet?.imagine_media) {
for (const media of mediaSet.imagine_media) {
if (media?.uri) {
const url = media?.uri || media?.image_uri || media?.image?.uri;
if (url) {
images.push({
url: media.uri,
url: url,
prompt: originalPrompt,
model: "imagine"
model: "meta"
});
}
}
@ -262,15 +515,17 @@ export class MetaAIClient {
}
}
// Check for attachments
// Check for attachments (alternative path)
const attachments = messageData?.attachments;
if (attachments) {
for (const attachment of attachments) {
if (attachment?.media?.image_uri) {
const media = attachment?.media;
const url = media?.image_uri || media?.uri || media?.image?.uri;
if (url) {
images.push({
url: attachment.media.image_uri,
url: url,
prompt: originalPrompt,
model: "imagine"
model: "meta"
});
}
}
@ -283,79 +538,72 @@ export class MetaAIClient {
* Poll for image generation completion
*/
private async pollForImages(initialResponse: any, prompt: string): Promise<MetaImageResult[]> {
const maxAttempts = 30;
const pollInterval = 2000;
// Kadabra uses external_conversation_id for polling
const conversationId = initialResponse?.data?.useKadabraSendMessageMutation?.node?.external_conversation_id ||
initialResponse?.data?.node?.external_conversation_id;
// Get the fetch_id from initial response for polling
const fetchId = initialResponse?.data?.node?.id ||
initialResponse?.data?.xabraAIPreviewMessageSendMutation?.message?.id;
if (!fetchId) {
console.warn("[Meta AI] No fetch ID for polling, returning empty");
if (!conversationId) {
console.warn("[Meta AI] No conversation ID found for polling");
return [];
}
const maxAttempts = 30;
const pollInterval = 2000;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
console.log(`[Meta AI] Polling attempt ${attempt + 1}/${maxAttempts}...`);
await new Promise(resolve => setTimeout(resolve, pollInterval));
try {
// Query for the message status
const statusResponse = await this.queryMessageStatus(fetchId);
const images = this.extractImages(statusResponse, prompt);
if (images.length > 0) {
console.log(`[Meta AI] Got ${images.length} images!`);
return images;
}
// Check if generation failed
const status = statusResponse?.data?.node?.imagine_card?.session?.status;
if (status === "FAILED" || status === "ERROR") {
throw new Error("Meta AI image generation failed");
}
} catch (e) {
console.error("[Meta AI] Poll error:", e);
if (attempt === maxAttempts - 1) throw e;
}
}
throw new Error("Meta AI: Image generation timed out");
}
/**
* Query message status for polling
*/
private async queryMessageStatus(messageId: string): Promise<any> {
const variables = {
id: messageId
external_conversation_id: conversationId
};
const body = new URLSearchParams({
fb_api_caller_class: "RelayModern",
fb_api_req_friendly_name: "useAbraMessageQuery",
fb_api_req_friendly_name: "KadabraPromptRootQuery",
variables: JSON.stringify(variables),
doc_id: "7654946557897648",
doc_id: "25290569913909283", // KadabraPromptRootQuery ID
...(this.session.lsd && { lsd: this.session.lsd }),
...(this.session.fb_dtsg && { fb_dtsg: this.session.fb_dtsg })
});
try {
const response = await fetch(GRAPHQL_ENDPOINT, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": this.cookies,
"Origin": META_AI_BASE,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
...(this.session.accessToken && { "Authorization": `OAuth ${this.session.accessToken}` })
},
body: body.toString()
});
return response.json();
const data = await response.json();
const images = this.extractImages(data, prompt);
if (images.length > 0) {
console.log(`[Meta AI] Got ${images.length} image(s) after polling!`);
return images;
}
// Check for failure status
const status = data?.data?.kadabra_prompt?.status;
if (status === "FAILED" || status === "ERROR") {
console.error("[Meta AI] Generation failed during polling");
break;
}
} catch (e: any) {
console.error("[Meta AI] Poll error:", e.message);
}
}
return [];
}
/**
* Download image from URL and convert to base64
*/

View file

@ -1,227 +0,0 @@
/**
* Meta AI Crawl4AI Client
*
* TypeScript client for the Python Crawl4AI microservice
* that handles Meta AI image generation with bot detection bypass.
*/
const CRAWL4AI_URL = process.env.CRAWL4AI_URL || 'http://localhost:8000';
export interface MetaCrawlImage {
url: string;
data?: string; // base64
prompt: string;
model: string;
}
export interface MetaCrawlResponse {
success: boolean;
images: MetaCrawlImage[];
error?: string;
task_id?: string;
}
export interface TaskStatusResponse {
task_id: string;
status: 'pending' | 'processing' | 'completed' | 'failed';
images: MetaCrawlImage[];
error?: string;
progress?: number;
}
export class MetaCrawlClient {
private baseUrl: string;
constructor(baseUrl?: string) {
this.baseUrl = baseUrl || CRAWL4AI_URL;
}
/**
* Generate images synchronously (waits for completion)
*/
async generate(
prompt: string,
cookies: string,
numImages: number = 4
): Promise<MetaCrawlImage[]> {
console.log(`[MetaCrawl] Sending request to ${this.baseUrl}/generate/sync`);
const response = await fetch(`${this.baseUrl}/generate/sync`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt,
cookies,
num_images: numImages
})
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`MetaCrawl service error: ${response.status} - ${errorText}`);
}
const data: MetaCrawlResponse = await response.json();
if (!data.success) {
throw new Error(data.error || 'Image generation failed');
}
return data.images;
}
/**
* Start async image generation (returns immediately with task_id)
*/
async generateAsync(
prompt: string,
cookies: string,
numImages: number = 4
): Promise<string> {
const response = await fetch(`${this.baseUrl}/generate`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt,
cookies,
num_images: numImages
})
});
if (!response.ok) {
throw new Error(`MetaCrawl service error: ${response.status}`);
}
const data: MetaCrawlResponse = await response.json();
if (!data.task_id) {
throw new Error('No task_id returned from async generation');
}
return data.task_id;
}
/**
* Get status of an async generation task
*/
async getTaskStatus(taskId: string): Promise<TaskStatusResponse> {
const response = await fetch(`${this.baseUrl}/status/${taskId}`);
if (!response.ok) {
if (response.status === 404) {
throw new Error('Task not found');
}
throw new Error(`MetaCrawl service error: ${response.status}`);
}
return response.json();
}
/**
* Poll for async task completion
*/
async waitForCompletion(
taskId: string,
pollIntervalMs: number = 2000,
timeoutMs: number = 120000
): Promise<MetaCrawlImage[]> {
const startTime = Date.now();
while (Date.now() - startTime < timeoutMs) {
const status = await this.getTaskStatus(taskId);
if (status.status === 'completed') {
return status.images;
}
if (status.status === 'failed') {
throw new Error(status.error || 'Generation failed');
}
await new Promise(resolve => setTimeout(resolve, pollIntervalMs));
}
throw new Error('Task timed out');
}
/**
* Check if the Crawl4AI service is healthy
*/
async healthCheck(): Promise<boolean> {
try {
const response = await fetch(`${this.baseUrl}/health`);
return response.ok;
} catch {
return false;
}
}
/**
* Get current rate limit status
*/
async getRateLimitStatus(): Promise<{
requests_this_hour: number;
max_per_hour: number;
delay_seconds: number;
}> {
const response = await fetch(`${this.baseUrl}/rate-limit`);
return response.json();
}
/**
* Generate video from text prompt (and optionally an image) using Meta AI
* - Text-to-Video: Just provide prompt and cookies
* - Image-to-Video: Also provide imageBase64
* Video generation takes longer than image generation (30-60+ seconds)
*/
async generateVideo(
prompt: string,
cookies: string,
imageBase64?: string
): Promise<MetaCrawlVideoResponse> {
const mode = imageBase64 ? 'image-to-video' : 'text-to-video';
console.log(`[MetaCrawl] Sending ${mode} request to ${this.baseUrl}/video/generate`);
const response = await fetch(`${this.baseUrl}/video/generate`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt,
cookies,
image_base64: imageBase64
})
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`MetaCrawl video service error: ${response.status} - ${errorText}`);
}
const data: MetaCrawlVideoResponse = await response.json();
if (!data.success) {
throw new Error(data.error || 'Video generation failed');
}
return data;
}
}
export interface MetaCrawlVideo {
url: string;
prompt: string;
model: string;
}
export interface MetaCrawlVideoResponse {
success: boolean;
videos: MetaCrawlVideo[];
error?: string;
conversation_id?: string;
}

View file

@ -85,11 +85,13 @@ interface AppState {
provider: 'whisk' | 'grok' | 'meta';
// Whisk (Google)
whiskCookies: string;
// Grok (xAI)
// Grok (xAI) - xLmiler/grok2api_python backend
grokApiUrl: string;
grokApiKey: string;
grokCookies: string;
// Meta AI
metaCookies: string;
facebookCookies: string;
};
setSettings: (s: Partial<AppState['settings']>) => void;
}
@ -154,7 +156,9 @@ export const useStore = create<AppState>()(
},
clearGallery: async () => {
await db.gallery.clear();
set({ gallery: [] });
// Also clear persistent videos and history if desired, or just gallery?
// Assuming "Clear All" in Gallery context implies clearing the visual workspace.
set({ gallery: [], videos: [] });
},
@ -176,15 +180,17 @@ export const useStore = create<AppState>()(
})),
settings: {
aspectRatio: '1:1',
aspectRatio: '9:16',
preciseMode: false,
imageCount: 4,
theme: 'dark',
provider: 'whisk',
whiskCookies: '',
grokApiUrl: 'http://localhost:3001',
grokApiKey: '',
grokCookies: '',
metaCookies: ''
metaCookies: '',
facebookCookies: ''
},
setSettings: (s) => set((state) => ({ settings: { ...state.settings, ...s } }))
}),
@ -192,9 +198,9 @@ export const useStore = create<AppState>()(
name: 'kv-pix-storage',
partialize: (state) => ({
settings: state.settings,
// gallery: state.gallery, // Don't persist gallery to localStorage
// gallery: state.gallery, // Don't persist gallery to localStorage (too large)
history: state.history,
videos: state.videos // Persist videos
// videos: state.videos // Don't persist videos to localStorage (too large)
}),
}
)

View file

@ -1,38 +0,0 @@
# Meta AI Python Service
# Uses metaai-api library from https://github.com/mir-ashiq/metaai-api
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies for requests-html (pyppeteer)
RUN apt-get update && apt-get install -y \
git \
chromium \
chromium-driver \
&& rm -rf /var/lib/apt/lists/*
# Set Chrome path for pyppeteer
ENV CHROMIUM_EXECUTABLE=/usr/bin/chromium
# Copy requirements first for caching
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Install metaai-api from GitHub (for latest updates)
RUN pip install --no-cache-dir git+https://github.com/mir-ashiq/metaai-api.git
# Copy application code
COPY app/ ./app/
# Create non-root user
RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app
USER appuser
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]

View file

@ -1,91 +0,0 @@
# Meta AI Image Generation Service
FastAPI wrapper for Meta AI image generation using [metaai-api](https://github.com/mir-ashiq/metaai-api).
## Features
- 🎨 **Image Generation** - Generate AI images via Meta AI
- 🔐 **Cookie Auth** - Uses Facebook/Meta cookies for authentication
- ⚡ **Rate Limiting** - Built-in rate limiting to prevent shadowban
- 🐳 **Docker Ready** - Easy deployment with Docker
## Quick Start
### Local Development
```bash
# Install dependencies
pip install -r requirements.txt
pip install git+https://github.com/mir-ashiq/metaai-api.git
# Run server
uvicorn app.main:app --reload --port 8000
```
### Docker
```bash
docker build -t meta-ai-service .
docker run -p 8000:8000 meta-ai-service
```
## API Endpoints
| Endpoint | Method | Description |
|----------|--------|-------------|
| `/health` | GET | Health check |
| `/generate/sync` | POST | Sync generation (waits for result) |
| `/generate` | POST | Async generation (returns task_id) |
| `/status/{task_id}` | GET | Get async task status |
| `/rate-limit` | GET | Get rate limit status |
## Usage Example
```python
import requests
response = requests.post("http://localhost:8000/generate/sync", json={
"prompt": "Imagine a beautiful sunset over mountains",
"cookies": [
{"name": "c_user", "value": "..."},
{"name": "xs", "value": "..."},
# ... other Facebook/Meta cookies
]
})
print(response.json())
# {"success": true, "images": [{"url": "...", "prompt": "...", "model": "imagine"}]}
```
## Required Cookies
You need Facebook/Meta cookies for authentication:
| Cookie | Description |
|--------|-------------|
| `c_user` | Facebook user ID |
| `xs` | Facebook session token |
| `sb` | Session browser identifier |
| `datr` | Device tracking |
| `abra_sess` | Meta AI session |
Export cookies from browser using Cookie-Editor extension.
## Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `META_RATE_LIMIT_DELAY` | 30 | Seconds between requests |
| `META_MAX_REQUESTS_HOUR` | 30 | Max requests per hour |
## Updating metaai-api
To get the latest version of the underlying library:
```bash
pip install -U git+https://github.com/mir-ashiq/metaai-api.git
```
## License
MIT

View file

@ -1 +0,0 @@
# Crawl4AI Meta AI Service

View file

@ -1,21 +0,0 @@
"""
Configuration settings for Meta AI service.
Uses environment variables with sensible defaults.
"""
import os
class Settings:
"""Configuration settings loaded from environment variables"""
# Rate limiting
rate_limit_delay: float = float(os.getenv("META_RATE_LIMIT_DELAY", "30"))
max_requests_per_hour: int = int(os.getenv("META_MAX_REQUESTS_HOUR", "30"))
# Meta AI URLs (used by our wrapper)
meta_ai_base: str = "https://www.meta.ai"
graphql_endpoint: str = "https://www.meta.ai/api/graphql/"
settings = Settings()

View file

@ -1,7 +0,0 @@
from .logger import Log
from .runtime import Run, Utils
from .headers import Headers
from .reverse.parser import Parser
from .reverse.xctid import Signature
from .reverse.anon import Anon
from .grok import Grok

View file

@ -1,328 +0,0 @@
from .logger import Log
from .runtime import Run, Utils
from .reverse.parser import Parser
from .reverse.xctid import Signature
from .reverse.anon import Anon
from .headers import Headers
from curl_cffi import requests, CurlMime
from dataclasses import dataclass, field
from bs4 import BeautifulSoup
from json import dumps, loads
from secrets import token_hex
from uuid import uuid4
@dataclass
class Models:
models: dict[str, list[str]] = field(default_factory=lambda: {
"grok-3-auto": ["MODEL_MODE_AUTO", "auto"],
"grok-3-fast": ["MODEL_MODE_FAST", "fast"],
"grok-4": ["MODEL_MODE_EXPERT", "expert"],
"grok-4-mini-thinking-tahoe": ["MODEL_MODE_GROK_4_MINI_THINKING", "grok-4-mini-thinking"]
})
def get_model_mode(self, model: str, index: int) -> str:
return self.models.get(model, ["MODEL_MODE_AUTO", "auto"])[index]
_Models = Models()
class Grok:
def __init__(self, model: str = "grok-3-auto", proxy: str = None) -> None:
self.session: requests.session.Session = requests.Session(impersonate="chrome136", default_headers=False)
self.headers: Headers = Headers()
self.model_mode: str = _Models.get_model_mode(model, 0)
self.model: str = model
self.mode: str = _Models.get_model_mode(model, 1)
self.c_run: int = 0
self.keys: dict = Anon.generate_keys()
if proxy:
self.session.proxies = {
"all": proxy
}
def _load(self, extra_data: dict = None) -> None:
if not extra_data:
self.session.headers = self.headers.LOAD
load_site: requests.models.Response = self.session.get('https://grok.com/c')
self.session.cookies.update(load_site.cookies)
scripts: list = [s['src'] for s in BeautifulSoup(load_site.text, 'html.parser').find_all('script', src=True) if s['src'].startswith('/_next/static/chunks/')]
self.actions, self.xsid_script = Parser.parse_grok(scripts)
self.baggage: str = Utils.between(load_site.text, '<meta name="baggage" content="', '"')
self.sentry_trace: str = Utils.between(load_site.text, '<meta name="sentry-trace" content="', '-')
else:
self.session.cookies.update(extra_data["cookies"])
self.actions: list = extra_data["actions"]
self.xsid_script: list = extra_data["xsid_script"]
self.baggage: str = extra_data["baggage"]
self.sentry_trace: str = extra_data["sentry_trace"]
if not self.baggage:
Log.Error("Failed to extract baggage token")
if 'load_site' in locals():
with open("debug_grok_response.html", "w", encoding="utf-8") as f:
f.write(load_site.text)
with open("error.log", "a") as f:
f.write(f"FAILED TO EXTRACT BAGGAGE. HTML saved to debug_grok_response.html\n")
else:
with open("error.log", "a") as f:
f.write(f"FAILED TO EXTRACT BAGGAGE (No load_site object).\n")
# Don't crash here, subsequent requests will fail but log will be preserved
def c_request(self, next_action: str) -> None:
# Safety check for missing tokens
if not self.baggage:
return
self.session.headers = self.headers.C_REQUEST
self.session.headers.update({
'baggage': self.baggage,
'next-action': next_action,
'sentry-trace': f'{self.sentry_trace}-{str(uuid4()).replace("-", "")[:16]}-0',
})
self.session.headers = Headers.fix_order(self.session.headers, self.headers.C_REQUEST)
if self.c_run == 0:
self.session.headers.pop("content-type")
mime = CurlMime()
mime.addpart(name="1", data=bytes(self.keys["userPublicKey"]), filename="blob", content_type="application/octet-stream")
mime.addpart(name="0", filename=None, data='[{"userPublicKey":"$o1"}]')
c_request: requests.models.Response = self.session.post("https://grok.com/c", multipart=mime)
self.session.cookies.update(c_request.cookies)
self.anon_user: str = Utils.between(c_request.text, '{"anonUserId":"', '"')
self.c_run += 1
else:
if self.c_run == 1:
data: str = dumps([{"anonUserId":self.anon_user}])
elif self.c_run == 2:
data: str = dumps([{"anonUserId":self.anon_user,**self.challenge_dict}])
c_request: requests.models.Response = self.session.post('https://grok.com/c', data=data)
self.session.cookies.update(c_request.cookies)
if self.c_run == 1:
start_idx = c_request.content.hex().find("3a6f38362c")
if start_idx != -1:
start_idx += len("3a6f38362c")
end_idx = c_request.content.hex().find("313a", start_idx)
if end_idx != -1:
challenge_hex = c_request.content.hex()[start_idx:end_idx]
challenge_bytes = bytes.fromhex(challenge_hex)
self.challenge_dict: dict = Anon.sign_challenge(challenge_bytes, self.keys["privateKey"])
Log.Success(f"Solved Challenge: {self.challenge_dict}")
elif self.c_run == 2:
self.verification_token, self.anim = Parser.get_anim(c_request.text, "grok-site-verification")
self.svg_data, self.numbers = Parser.parse_values(c_request.text, self.anim, self.xsid_script)
self.c_run += 1
def start_convo(self, message: str, extra_data: dict = None) -> dict:
if not extra_data:
self._load()
if not self.actions or len(self.actions) < 3:
Log.Error(f"Failed to load actions: {self.actions}")
return {"error": "Failed to initialize Grok connection (missing actions)."}
self.c_request(self.actions[0])
self.c_request(self.actions[1])
self.c_request(self.actions[2])
xsid: str = Signature.generate_sign('/rest/app-chat/conversations/new', 'POST', self.verification_token, self.svg_data, self.numbers)
else:
self._load(extra_data)
self.c_run: int = 1
self.anon_user: str = extra_data["anon_user"]
self.keys["privateKey"] = extra_data["privateKey"]
self.c_request(self.actions[1])
self.c_request(self.actions[2])
xsid: str = Signature.generate_sign(f'/rest/app-chat/conversations/{extra_data["conversationId"]}/responses', 'POST', self.verification_token, self.svg_data, self.numbers)
self.session.headers = self.headers.CONVERSATION
self.session.headers.update({
'baggage': self.baggage,
'sentry-trace': f'{self.sentry_trace}-{str(uuid4()).replace("-", "")[:16]}-0',
'x-statsig-id': xsid,
'x-xai-request-id': str(uuid4()),
'traceparent': f"00-{token_hex(16)}-{token_hex(8)}-00"
})
self.session.headers = Headers.fix_order(self.session.headers, self.headers.CONVERSATION)
if not extra_data:
conversation_data: dict = {
'temporary': False,
'modelName': self.model,
'message': message,
'fileAttachments': [],
'imageAttachments': [],
'disableSearch': False,
'enableImageGeneration': True,
'returnImageBytes': False,
'returnRawGrokInXaiRequest': False,
'enableImageStreaming': True,
'imageGenerationCount': 2,
'forceConcise': False,
'toolOverrides': {},
'enableSideBySide': True,
'sendFinalMetadata': True,
'isReasoning': False,
'webpageUrls': [],
'disableTextFollowUps': False,
'responseMetadata': {
'requestModelDetails': {
'modelId': self.model,
},
},
'disableMemory': False,
'forceSideBySide': False,
'modelMode': self.model_mode,
'isAsyncChat': False,
}
convo_request: requests.models.Response = self.session.post('https://grok.com/rest/app-chat/conversations/new', json=conversation_data, timeout=9999)
if "modelResponse" in convo_request.text:
response = conversation_id = parent_response = image_urls = None
stream_response: list = []
for response_dict in convo_request.text.strip().split('\n'):
data: dict = loads(response_dict)
token: str = data.get('result', {}).get('response', {}).get('token')
if token:
stream_response.append(token)
if not response and data.get('result', {}).get('response', {}).get('modelResponse', {}).get('message'):
response: str = data['result']['response']['modelResponse']['message']
if not conversation_id and data.get('result', {}).get('conversation', {}).get('conversationId'):
conversation_id: str = data['result']['conversation']['conversationId']
if not parent_response and data.get('result', {}).get('response', {}).get('modelResponse', {}).get('responseId'):
parent_response: str = data['result']['response']['modelResponse']['responseId']
if not image_urls and data.get('result', {}).get('response', {}).get('modelResponse', {}).get('generatedImageUrls', {}):
image_urls: str = data['result']['response']['modelResponse']['generatedImageUrls']
return {
"response": response,
"stream_response": stream_response,
"images": image_urls,
"extra_data": {
"anon_user": self.anon_user,
"cookies": self.session.cookies.get_dict(),
"actions": self.actions,
"xsid_script": self.xsid_script,
"baggage": self.baggage,
"sentry_trace": self.sentry_trace,
"conversationId": conversation_id,
"parentResponseId": parent_response,
"privateKey": self.keys["privateKey"]
}
}
else:
if 'rejected by anti-bot rules' in convo_request.text:
return Grok(self.session.proxies.get("all")).start_convo(message=message, extra_data=extra_data)
Log.Error("Something went wrong")
Log.Error(convo_request.text)
return {"error": convo_request.text}
else:
conversation_data: dict = {
'message': message,
'modelName': self.model,
'parentResponseId': extra_data["parentResponseId"],
'disableSearch': False,
'enableImageGeneration': True,
'imageAttachments': [],
'returnImageBytes': False,
'returnRawGrokInXaiRequest': False,
'fileAttachments': [],
'enableImageStreaming': True,
'imageGenerationCount': 2,
'forceConcise': False,
'toolOverrides': {},
'enableSideBySide': True,
'sendFinalMetadata': True,
'customPersonality': '',
'isReasoning': False,
'webpageUrls': [],
'metadata': {
'requestModelDetails': {
'modelId': self.model,
},
'request_metadata': {
'model': self.model,
'mode': self.mode,
},
},
'disableTextFollowUps': False,
'disableArtifact': False,
'isFromGrokFiles': False,
'disableMemory': False,
'forceSideBySide': False,
'modelMode': self.model_mode,
'isAsyncChat': False,
'skipCancelCurrentInflightRequests': False,
'isRegenRequest': False,
}
convo_request: requests.models.Response = self.session.post(f'https://grok.com/rest/app-chat/conversations/{extra_data["conversationId"]}/responses', json=conversation_data, timeout=9999)
if "modelResponse" in convo_request.text:
response = conversation_id = parent_response = image_urls = None
stream_response: list = []
for response_dict in convo_request.text.strip().split('\n'):
data: dict = loads(response_dict)
token: str = data.get('result', {}).get('token')
if token:
stream_response.append(token)
if not response and data.get('result', {}).get('modelResponse', {}).get('message'):
response: str = data['result']['modelResponse']['message']
if not parent_response and data.get('result', {}).get('modelResponse', {}).get('responseId'):
parent_response: str = data['result']['modelResponse']['responseId']
if not image_urls and data.get('result', {}).get('modelResponse', {}).get('generatedImageUrls', {}):
image_urls: str = data['result']['modelResponse']['generatedImageUrls']
return {
"response": response,
"stream_response": stream_response,
"images": image_urls,
"extra_data": {
"anon_user": self.anon_user,
"cookies": self.session.cookies.get_dict(),
"actions": self.actions,
"xsid_script": self.xsid_script,
"baggage": self.baggage,
"sentry_trace": self.sentry_trace,
"conversationId": extra_data["conversationId"],
"parentResponseId": parent_response,
"privateKey": self.keys["privateKey"]
}
}
else:
if 'rejected by anti-bot rules' in convo_request.text:
return Grok(self.session.proxies.get("all")).start_convo(message=message, extra_data=extra_data)
Log.Error("Something went wrong")
Log.Error(convo_request.text)
return {"error": convo_request.text}

View file

@ -1,78 +0,0 @@
class Headers:
@staticmethod
def fix_order(headers, base) -> dict:
ordered: dict = {}
for key in base:
if key in headers:
ordered[key] = headers[key]
for key, value in headers.items():
if key not in ordered:
ordered[key] = value
return ordered
def __init__(self) -> None:
self.LOAD: dict = {
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"sec-ch-ua": "\"Google Chrome\";v=\"143\", \"Chromium\";v=\"143\", \"Not A(Brand\";v=\"24\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-site": "none",
"sec-fetch-mode": "navigate",
"sec-fetch-user": "?1",
"sec-fetch-dest": "document",
"accept-encoding": "gzip, deflate, br, zstd",
"accept-language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7",
"priority": "u=0, i",
}
self.C_REQUEST: dict = {
"sec-ch-ua-platform": "\"Windows\"",
"next-action": "",
"sec-ch-ua": "\"Google Chrome\";v=\"143\", \"Chromium\";v=\"143\", \"Not A(Brand\";v=\"24\"",
"sec-ch-ua-mobile": "?0",
"next-router-state-tree": "%5B%22%22%2C%7B%22children%22%3A%5B%22c%22%2C%7B%22children%22%3A%5B%5B%22slug%22%2C%22%22%2C%22oc%22%5D%2C%7B%22children%22%3A%5B%22__PAGE__%22%2C%7B%7D%2Cnull%2Cnull%5D%7D%2Cnull%2Cnull%5D%7D%2Cnull%2Cnull%5D%7D%2Cnull%2Cnull%2Ctrue%5D",
"baggage": '',
"sentry-trace": "",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36",
"accept": "text/x-component",
"content-type": "text/plain;charset=UTF-8",
"origin": "https://grok.com",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://grok.com/c",
"accept-encoding": "gzip, deflate, br, zstd",
"accept-language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7",
"priority": "u=1, i",
}
self.CONVERSATION: dict = {
"x-xai-request-id": "",
"sec-ch-ua-platform": "\"Windows\"",
"sec-ch-ua": "\"Google Chrome\";v=\"143\", \"Chromium\";v=\"143\", \"Not A(Brand\";v=\"24\"",
"sec-ch-ua-mobile": "?0",
"baggage": "",
"sentry-trace": "",
"traceparent": "",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36",
"content-type": "application/json",
"x-statsig-id": "",
"accept": "*/*",
"origin": "https://grok.com",
"sec-fetch-site": "same-origin",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "https://grok.com/",
"accept-encoding": "gzip, deflate, br, zstd",
"accept-language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7",
"priority": "u=1, i",
}

View file

@ -1,60 +0,0 @@
from typing import Optional
from datetime import datetime
from colorama import Fore
from threading import Lock
from time import time
class Log:
"""
Logging class to log text better in console.
"""
colours: Optional[dict] = {
'SUCCESS': Fore.LIGHTGREEN_EX,
'ERROR': Fore.LIGHTRED_EX,
'INFO': Fore.LIGHTWHITE_EX
}
lock = Lock()
@staticmethod
def _log(level, prefix, message) -> Optional[None]:
"""
Private log function to build the payload to print.
:param level: Just not used, only a filler
:param prefix: Prefix to indicate if its Success, Error or Info
:param message: Message to Log
"""
timestamp: Optional[int] = datetime.fromtimestamp(time()).strftime("%H:%M:%S")
log_message = (
f"{Fore.LIGHTBLACK_EX}[{Fore.MAGENTA}{timestamp}{Fore.RESET}{Fore.LIGHTBLACK_EX}]{Fore.RESET} "
f"{prefix} {message}"
)
with Log.lock:
print(log_message)
@staticmethod
def Success(message, prefix="[+]", color=colours['SUCCESS']) -> Optional[None]:
"""
Logging a Success message.
"""
Log._log("SUCCESS", f"{color}{prefix}{Fore.RESET}", message)
@staticmethod
def Error(message, prefix="[!]", color=colours['ERROR']) -> Optional[None]:
"""
Logging an Error Message.
"""
Log._log("ERROR", f"{color}{prefix}{Fore.RESET}", message)
@staticmethod
def Info(message, prefix="[!]", color=colours['INFO']) -> Optional[None]:
"""
Logging an Info Message.
"""
Log._log("INFO", f"{color}{prefix}{Fore.RESET}", message)

View file

@ -1,65 +0,0 @@
[
{
"xsid_script": "static/chunks/444a4d2e0656ce52.js",
"action_script": "/_next/static/chunks/07efa55314110fbd.js",
"actions": [
"7f7a9e476198643fb30f17ab0e0c41f8f2edc18ae7",
"7f0a06a29ceb599ed2d3901e16b2a1e088d2372deb",
"7f38fb97af610ff9d28ae27294dc41bd9eca880852"
]
},
{
"xsid_script": "static/chunks/9e496d2be7115b4d.js",
"action_script": "/_next/static/chunks/fcbe5d6b4ae286fe.js",
"actions": [
"7fd00a18c007ec926f1136cb558f9ef9f903dcc1f4",
"7f795a3c3829bb45c6e2d2ad0587c7e039f513a509",
"7fa94a2c9b7ebcf8874e824d3365d9b9735a7afe34"
]
},
{
"xsid_script": "static/chunks/069cbd766e2e100e.js",
"action_script": "/_next/static/chunks/cb52eeab0fd0e58c.js",
"actions": [
"7fffbbcd70e50341926589c4f0ed7ab475afad3321",
"7fdf5ae16dee580d89683963be28bc62f1603ffea1",
"7f37fea17b375870e80133012d199e6cdee6201091"
]
},
{
"xsid_script": "static/chunks/c1c11f0dd2cadabf.js",
"action_script": "/_next/static/chunks/bdf3abb63890a18e.js",
"actions": [
"7f71f42b11fe0a773c18539575170eb3cda2720fff",
"7f8159187cdb2e21e48a06256220a8bbf7b1088b34",
"7fb14bed5522696e9d5cbec5fd92ea7cebee752db0"
]
},
{
"xsid_script": "static/chunks/720ab0732a942089.js",
"action_script": "/_next/static/chunks/dcf3a6315f86c917.js",
"actions": [
"7f8b78848a6f7726b96bec61b199a7bdc02e392621",
"7f1e31eb362d2be64d0ab258d72fc770ecbb261237",
"7f0c6140a77d46f5696f9b5d4fec00e3165e9bf678"
]
},
{
"xsid_script": "static/chunks/68f6ef173efbeb67.js",
"action_script": "/_next/static/chunks/4114b4b6e0483e8c.js",
"actions": [
"7f3749b0c81bd826ca8cc02ccf8009a911410e49f7",
"7f5e48bfe2a1588dc86c1fe1bf3eac0e2676f55532",
"7f5341512f3793d10791b2ca628b300aac6ba34b98"
]
},
{
"xsid_script": "static/chunks/87d576c60e76a1e9.js",
"action_script": "/_next/static/chunks/843010bb02f13cde.js",
"actions": [
"7fb4349e44719d28ba8da9344e11ab7e5e3b1c474f",
"7f9a9b0c62c7c8775525be38003aa09725ea709115",
"7f82eca570c9532c4193e3784a3a017ef7229a3edf"
]
}
]

View file

@ -1 +0,0 @@
{"https://grok.com/_next/static/chunks/29589.8ec1f2947a0e205d.js": [6, 14, 12, 16], "https://grok.com/_next/static/chunks/e628011fd4d67558.js": [0, 2, 8, 9], "https://grok.com/_next/static/chunks/77ffaef786c38d59.js": [13, 33, 11, 36], "https://grok.com/_next/static/chunks/444a4d2e0656ce52.js": [14, 10, 25, 24], "https://grok.com/_next/static/chunks/9e496d2be7115b4d.js": [11, 24, 38, 38], "https://grok.com/_next/static/chunks/069cbd766e2e100e.js": [0, 37, 0, 45], "https://grok.com/_next/static/chunks/c1c11f0dd2cadabf.js": [25, 10, 30, 26], "https://grok.com/_next/static/chunks/720ab0732a942089.js": [41, 6, 33, 12], "https://grok.com/_next/static/chunks/68f6ef173efbeb67.js": [31, 26, 18, 35], "https://grok.com/_next/static/chunks/87d576c60e76a1e9.js": [18, 23, 44, 33]}

View file

@ -1,43 +0,0 @@
from base64 import b64encode, b64decode
from secrets import token_bytes
from coincurve import PrivateKey
from hashlib import sha256
class Anon:
@staticmethod
def publicKeyCreate(e) -> list:
privkey = PrivateKey(bytes(e))
publicKey = privkey.public_key.format(compressed=True)
return list(publicKey)
@staticmethod
def xor(e) -> str:
t = ""
for n in range(len(e)):
t += chr(e[n])
return b64encode(t.encode('latin-1')).decode()
@staticmethod
def generate_keys() -> dict:
e = token_bytes(32)
n = Anon.publicKeyCreate(e)
r = Anon.xor(e)
return {
"privateKey": r,
"userPublicKey": n
}
@staticmethod
def sign_challenge(challenge_data: bytes, key: str) -> dict:
key_bytes: bytes = b64decode(key)
privkey: PrivateKey = PrivateKey(key_bytes)
signature: bytes = privkey.sign_recoverable(sha256(challenge_data).digest(), hasher=None)[:64]
return {
"challenge": b64encode(challenge_data).decode(),
"signature": b64encode(signature).decode()
}

View file

@ -1,139 +0,0 @@
from re import findall, search
from json import load, dump
from base64 import b64decode
from typing import Optional
from curl_cffi import requests
from ..runtime import Utils
from os import path
class Parser:
mapping: dict = {}
_mapping_loaded: bool = False
grok_mapping: list = []
_grok_mapping_loaded: bool = False
@classmethod
def _load__xsid_mapping(cls):
if not cls._mapping_loaded and path.exists('core/mappings/txid.json'):
with open('core/mappings/txid.json', 'r') as f:
cls.mapping = load(f)
cls._mapping_loaded = True
@classmethod
def _load_grok_mapping(cls):
if not cls._grok_mapping_loaded and path.exists('core/mappings/grok.json'):
with open('core/mappings/grok.json', 'r') as f:
cls.grok_mapping = load(f)
cls._grok_mapping_loaded = True
@staticmethod
def parse_values(html: str, loading: str = "loading-x-anim-0", scriptId: str = "") -> tuple[str, Optional[str]]:
Parser._load__xsid_mapping()
all_d_values = findall(r'"d":"(M[^"]{200,})"', html)
if not all_d_values:
# Fallback or error
print("Warning: No SVG paths found")
return "", None if scriptId else ""
try:
anim_index = int(loading.split("loading-x-anim-")[1])
if anim_index >= len(all_d_values):
anim_index = 0
svg_data = all_d_values[anim_index]
except (IndexError, ValueError):
svg_data = all_d_values[0]
if scriptId:
if scriptId == "ondemand.s":
script_link: str = 'https://abs.twimg.com/responsive-web/client-web/ondemand.s.' + Utils.between(html, f'"{scriptId}":"', '"') + 'a.js'
else:
script_link: str = f'https://grok.com/_next/{scriptId}'
if script_link in Parser.mapping:
numbers: list = Parser.mapping[script_link]
else:
script_content: str = requests.get(script_link, impersonate="chrome136").text
matches = findall(r'x\[(\d+)\]\s*,\s*16', script_content)
if matches:
numbers: list = [int(x) for x in matches]
else:
numbers = []
Parser.mapping[script_link] = numbers
if path.exists('core/mappings'):
try:
with open('core/mappings/txid.json', 'w') as f:
dump(Parser.mapping, f)
except Exception as e:
print(f"Failed to save mapping: {e}")
return svg_data, numbers
else:
return svg_data
@staticmethod
def get_anim(html: str, verification: str = "grok-site-verification") -> tuple[str, str]:
verification_token: str = Utils.between(html, f'"name":"{verification}","content":"', '"')
try:
array: list = list(b64decode(verification_token))
if len(array) > 5:
anim: str = "loading-x-anim-" + str(array[5] % 4)
else:
anim = "loading-x-anim-0"
except Exception:
anim = "loading-x-anim-0"
return verification_token, anim
@staticmethod
def parse_grok(scripts: list) -> tuple[list, str]:
Parser._load_grok_mapping()
for index in Parser.grok_mapping:
if index.get("action_script") in scripts:
return index["actions"], index["xsid_script"]
script_content1: Optional[str] = None
script_content2: Optional[str] = None
action_script: Optional[str] = None
for script in scripts:
content: str = requests.get(f'https://grok.com{script}', impersonate="chrome136").text
if "anonPrivateKey" in content:
script_content1 = content
action_script = script
elif "880932)" in content:
script_content2 = content
if not script_content1 or not script_content2:
print("Failed to find required scripts")
return [], ""
actions: list = findall(r'createServerReference\)\("([a-f0-9]+)"', script_content1)
xsid_script: str = search(r'"(static/chunks/[^"]+\.js)"[^}]*?\(880932\)', script_content2).group(1)
if actions and xsid_script:
Parser.grok_mapping.append({
"xsid_script": xsid_script,
"action_script": action_script,
"actions": actions
})
with open('core/mappings/grok.json', 'w') as f:
dump(Parser.grok_mapping, f, indent=2)
return actions, xsid_script
else:
print("Something went wrong while parsing script and actions")

View file

@ -1,180 +0,0 @@
from math import floor, copysign, pi, cos, sin
from base64 import b64decode, b64encode
from re import findall, sub
from typing import List, Dict
from random import random
from hashlib import sha256
from struct import pack
from time import time
class Signature:
@staticmethod
def _h(x: float, _param: float, c: float, e: bool):
f = ((x * (c - _param)) / 255.0) + _param
if e:
return floor(f)
rounded = round(float(f), 2)
if rounded == 0.0:
return 0.0
return rounded
@staticmethod
def cubicBezierEased(t: float, x1: float, y1: float, x2: float, y2: float) -> float:
def bezier(u: float):
omu = 1.0 - u
b1 = 3.0 * omu * omu * u
b2 = 3.0 * omu * u * u
b3 = u * u * u
x = b1 * x1 + b2 * x2 + b3
y = b1 * y1 + b2 * y2 + b3
return x, y
lo, hi = 0.0, 1.0
for _ in range(80):
mid = 0.5 * (lo + hi)
if bezier(mid)[0] < t:
lo = mid
else:
hi = mid
u = 0.5 * (lo + hi)
return bezier(u)[1]
@staticmethod
def xa(svg: str) -> List[List[int]]:
s = (svg)
substr = s[9:]
parts = substr.split("C")
out = []
for part in parts:
cleaned = sub(r"[^\d]+", " ", part).strip()
if cleaned == "":
nums = [0]
else:
nums = [int(tok) for tok in cleaned.split() if tok != ""]
out.append(nums)
return out
@staticmethod
def tohex(num: float) -> str:
rounded = round(float(num), 2)
if rounded == 0.0:
return "0"
sign = "-" if copysign(1.0, rounded) < 0 else ""
absval = abs(rounded)
intpart = int(floor(absval))
frac = absval - intpart
if frac == 0.0:
return sign + format(intpart, "x")
frac_digits = []
f = frac
for _ in range(20):
f *= 16
digit = int(floor(f + 1e-12))
frac_digits.append(format(digit, "x"))
f -= digit
if abs(f) < 1e-12:
break
frac_str = "".join(frac_digits).rstrip("0")
if frac_str == "":
return sign + format(intpart, "x")
return sign + format(intpart, "x") + "." + frac_str
@staticmethod
def simulateStyle(values: List[int], c: int) -> Dict[str,str]:
duration = 4096
currentTime = round(c / 10.0) * 10
t = currentTime / duration
cp = [Signature._h(v, -1 if (i % 2) else 0, 1, False) for i, v in enumerate(values[7:])]
easedY = Signature.cubicBezierEased(t, cp[0], cp[1], cp[2], cp[3])
start = [float(x) for x in values[0:3]]
end = [float(x) for x in values[3:6]]
r = round(start[0] + (end[0] - start[0]) * easedY)
g = round(start[1] + (end[1] - start[1]) * easedY)
b = round(start[2] + (end[2] - start[2]) * easedY)
color = f"rgb({r}, {g}, {b})"
endAngle = Signature._h(values[6], 60, 360, True)
angle = endAngle * easedY
rad = angle * pi / 180.0
def is_effectively_zero(val: float) -> bool:
return abs(val) < 1e-7
def is_effectively_integer(val: float) -> bool:
return abs(val - round(val)) < 1e-7
cosv = cos(rad)
sinv = sin(rad)
if is_effectively_zero(cosv):
a = 0
d = 0
else:
if is_effectively_integer(cosv):
a = int(round(cosv))
d = int(round(cosv))
else:
a = f"{cosv:.6f}"
d = f"{cosv:.6f}"
if is_effectively_zero(sinv):
bval = 0
cval = 0
else:
if is_effectively_integer(sinv):
bval = int(round(sinv))
cval = int(round(-sinv))
else:
bval = f"{sinv:.7f}"
cval = f"{(-sinv):.7f}"
transform = f"matrix({a}, {bval}, {cval}, {d}, 0, 0)"
return {"color": color, "transform": transform}
@staticmethod
def xs(x_bytes: bytes, svg: str, x_values: list) -> str:
arr = list(x_bytes)
idx = arr[x_values[0]] % 16
c = ((arr[x_values[1]] % 16) * (arr[x_values[2]] % 16)) * (arr[x_values[3]] % 16)
o = Signature.xa(svg)
vals = o[idx]
k = Signature.simulateStyle(vals, c)
concat = str(k["color"]) + str(k["transform"])
matches = findall(r"[\d\.\-]+", concat)
converted = []
for m in matches:
num = float(m)
hexstr = Signature.tohex(num)
converted.append(hexstr)
joined = "".join(converted)
cleaned = joined.replace(".", "").replace("-", "")
return cleaned
@staticmethod
def generate_sign(path: str, method: str, verification: str, svg: str, x_values: list, time_n: int = None, random_float: float = None) -> str:
n = int(time() - 1682924400) if not time_n else time_n
t = pack('<I', n)
r = b64decode(verification)
o = Signature.xs(r, svg, x_values)
msg = "!".join([method, path, str(n)]) + "obfiowerehiring" + o
digest = sha256(msg.encode('utf-8')).digest()[:16]
prefix_byte = int(floor(random() if not random_float else random_float * 256))
assembled = bytes([prefix_byte]) + r + t + digest + bytes([3])
arr = bytearray(assembled)
if len(arr) > 0:
first = arr[0]
for i in range(1, len(arr)):
arr[i] = arr[i] ^ first
return b64encode(bytes(arr)).decode('ascii').replace('=', '')

View file

@ -1,49 +0,0 @@
from typing import Callable, Any, Optional, Type
from functools import wraps
from .logger import Log
class Run:
"""
Class to handle runtime
"""
@staticmethod
def Error(func: Callable[..., Any]) -> Callable[..., Any]:
"""
Error function to catch errors
@param func: The function to wrap.
@return: Custom error message
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
except Exception as e:
Run.handle_error(e)
return None
return wrapper
@staticmethod
def handle_error(exception: Exception) -> Optional[None]:
"""
Handling an error
@param exception: Exception that occured
"""
Log.Error(f"Error occurred: {exception}")
exit()
class Utils:
@staticmethod
def between(
main_text: Optional[str],
value_1: Optional[str],
value_2: Optional[str],
) -> str:
try:
return main_text.split(value_1)[1].split(value_2)[0]
except (IndexError, AttributeError):
return ""

View file

@ -1,111 +0,0 @@
import asyncio
import logging
from typing import Dict, Optional
from playwright.async_api import async_playwright, Browser, Page
from playwright_stealth import Stealth
logger = logging.getLogger(__name__)
async def get_grok_cookies() -> Dict[str, str]:
"""
Launches a HEADFUL browser with Stealth settings to bypass Cloudflare.
Waits generously for manual user interaction if needed.
"""
browser: Optional[Browser] = None
try:
logger.info("Launching Playwright (Stealth Mode) for authentication...")
with open("error.log", "a") as f:
f.write("Browser: Launching Playwright (Visible, Stealth v2)...\n")
async with async_playwright() as p:
# Launch chromium in HEADFUL mode
browser = await p.chromium.launch(
headless=False,
args=[
'--no-sandbox',
'--disable-setuid-sandbox',
'--disable-blink-features=AutomationControlled',
'--start-maximized'
]
)
# Use a slightly more random user agent
context = await browser.new_context(
viewport=None, # Allow window to determine size
locale='en-US',
timezone_id='America/New_York'
)
page: Page = await context.new_page()
# Apply stealth using new Class-based API
# Try to initialize Stealth and apply async
stealth = Stealth()
await stealth.apply_stealth_async(page)
logger.info("Navigating to https://grok.com...")
with open("error.log", "a") as f:
f.write("Browser: Navigating to grok.com...\n")
# Go to page
try:
await page.goto('https://grok.com', timeout=60000, wait_until='domcontentloaded')
except Exception as e:
with open("error.log", "a") as f:
f.write(f"Browser: Navigation warning (might be loading): {e}\n")
with open("error.log", "a") as f:
f.write("Browser: Waiting 120s for challenge (Please solve manually if visible)...\n")
# Polling wait for 120s
# We explicitly check for success selector: textarea or specific home element
authenticated = False
for i in range(24): # 24 * 5s = 120s
try:
# Check for Success
if await page.query_selector('textarea[placeholder*="Grok"]'):
with open("error.log", "a") as f:
f.write("Browser: Success! Grok UI detected.\n")
authenticated = True
break
# Check for Failure/Challenge
content = await page.content()
if "Just a moment" in content:
if i % 2 == 0:
with open("error.log", "a") as f:
f.write(f"Browser: Still on Cloudflare challenge... ({i*5}s)\n")
else:
# Maybe it is loaded but selector didn't match yet?
pass
await asyncio.sleep(5)
except Exception as e:
pass
if not authenticated:
with open("error.log", "a") as f:
f.write("Browser: Timeout. Challenge NOT solved after 120s.\n")
# Take a screenshot to debug what was on screen
await page.screenshot(path="cloudflare_fail.png")
# Extract cookies regardless, maybe we got lucky
cookies = await context.cookies()
cookie_dict = {c['name']: c['value'] for c in cookies}
with open("error.log", "a") as f:
f.write(f"Browser: Extracted {len(cookie_dict)} cookies.\n")
if 'cf_clearance' in cookie_dict:
f.write("Browser: cf_clearance found.\n")
else:
f.write("Browser: WARNING: cf_clearance NOT found.\n")
return cookie_dict
except Exception as e:
logger.error(f"Browser authentication failed: {e}")
with open("error.log", "a") as f:
f.write(f"Browser auth failed exception: {e}\n")
return {}
finally:
pass

View file

@ -1,98 +0,0 @@
import logging
import asyncio
from typing import Optional, List, Dict
try:
# Try local import first (when running as app.main)
from .grok.grok import Grok
except ImportError:
try:
# Try absolute import (if running differently)
from app.grok.grok import Grok
except ImportError:
Grok = None
logger = logging.getLogger(__name__)
class GrokChatClient:
def __init__(self):
self.client = None
if Grok:
try:
self.client = Grok()
logger.info("Grok API client initialized safely")
except Exception as e:
logger.error(f"Failed to initialize Grok client: {e}")
else:
logger.warning("grok-api library not found. Please install git+https://github.com/realasfngl/Grok-Api.git")
async def chat(self, message: str, history: List[Dict[str, str]] = None, cookies: Dict[str, str] = None, user_agent: str = None) -> str:
"""
Send a chat message to Grok.
"""
if not self.client:
return "Error: Grok API not installed or initialized."
try:
# Apply user-provided cookies and UA
if cookies:
try:
self.client.session.cookies.update(cookies)
except Exception as e:
logger.warning(f"Failed to update cookies: {e}")
if user_agent:
# Override the User-Agent header in the session
self.client.session.headers["User-Agent"] = user_agent
# Also set in the client's internal headers object if present
if hasattr(self.client, 'headers'):
if hasattr(self.client.headers, 'LOAD'):
self.client.headers.LOAD["User-Agent"] = user_agent
if hasattr(self.client.headers, 'C_REQUEST'):
self.client.headers.C_REQUEST["User-Agent"] = user_agent
# Use start_convo (sync method)
try:
result = await asyncio.to_thread(self.client.start_convo, message)
except Exception as e:
# If we catch inside the thread, it returns dict with error
raise e
if isinstance(result, dict):
if "error" in result:
error_msg = result["error"]
# If we have cookies, we've already tried them.
# We can fallback to browser auth ONLY if no cookies were provided
# OR if the user wants us to try anyway.
if not cookies:
logger.warning(f"Grok request failed: {error_msg}. Attempting browser auth bypass...")
# Try to get cookies via browser
from .grok_auth import get_grok_cookies
browser_cookies = await get_grok_cookies()
if browser_cookies:
logger.info("Got cookies from browser, applying to Grok client...")
self.client.session.cookies.update(browser_cookies)
# Retry the request
result = await asyncio.to_thread(self.client.start_convo, message)
if isinstance(result, dict) and "error" in result:
raise Exception(f"Retry failed: {result['error']}")
else:
raise Exception(f"Browser auth failed, original error: {error_msg}")
else:
# If cookies were provided but failed, trust the error
logger.warning(f"User-provided cookies failed: {error_msg}")
raise Exception(f"Grok Error: {error_msg} (Check your cookies)")
return result.get("response", "No response from Grok.")
return str(result)
except Exception as e:
import traceback
error_trace = traceback.format_exc()
logger.error(f"Grok Chat Error: {e}\n{error_trace}")
with open("error.log", "a") as f:
f.write(f"\nFAILED GROK REQUEST: {e}\n{error_trace}")
return f"Error communicating with Grok: {str(e)}"

View file

@ -1,355 +0,0 @@
"""
Meta AI FastAPI Service (v2.0)
Uses metaai-api library for Meta AI image generation.
See: https://github.com/mir-ashiq/metaai-api
"""
from contextlib import asynccontextmanager
from fastapi import FastAPI, BackgroundTasks, HTTPException
from fastapi.middleware.cors import CORSMiddleware
import asyncio
import uuid
from .models import (
GenerateRequest,
GenerateResponse,
ImageResult,
TaskStatusResponse,
HealthResponse,
GrokChatRequest,
GrokChatResponse,
VideoGenerateRequest,
VideoGenerateResponse,
VideoResult
)
from .grok_client import GrokChatClient
from .meta_crawler import meta_crawler
# Initialize Grok client
grok_client = GrokChatClient()
# Task storage (in-memory for simplicity)
tasks: dict = {}
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Startup and shutdown events"""
print("[MetaAI] Starting Meta AI service...")
yield
print("[MetaAI] Shutting down...")
app = FastAPI(
title="Meta AI Image Generation Service",
description="FastAPI wrapper for Meta AI image generation using metaai-api",
version="2.0.0",
lifespan=lifespan
)
# CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/health", response_model=HealthResponse)
async def health_check():
"""Health check endpoint"""
return HealthResponse(
status="healthy",
version="2.0.0",
browser_ready=True # metaai-api handles this internally
)
@app.get("/rate-limit")
async def get_rate_limit():
"""Get current rate limiting status"""
return meta_crawler.get_rate_limit_status()
@app.post("/generate/sync", response_model=GenerateResponse)
async def generate_sync(request: GenerateRequest):
"""
Synchronous image generation - returns when complete.
Requires:
- prompt: The image generation prompt
- cookies: Facebook/Meta cookies (JSON array or string format)
"""
try:
images = await meta_crawler.generate_images(
prompt=request.prompt,
cookies=request.cookies,
num_images=request.num_images
)
return GenerateResponse(
success=True,
images=images,
error=None
)
except Exception as e:
return GenerateResponse(
success=False,
images=[],
error=str(e)
)
@app.post("/generate", response_model=GenerateResponse)
async def generate_async(request: GenerateRequest, background_tasks: BackgroundTasks):
"""
Async image generation - returns immediately with task_id.
Poll /status/{task_id} for results.
"""
task_id = str(uuid.uuid4())
tasks[task_id] = {
"status": "pending",
"images": [],
"error": None
}
async def run_generation():
try:
images = await meta_crawler.generate_images(
prompt=request.prompt,
cookies=request.cookies,
num_images=request.num_images
)
tasks[task_id] = {
"status": "completed",
"images": images,
"error": None
}
except Exception as e:
tasks[task_id] = {
"status": "failed",
"images": [],
"error": str(e)
}
# Run in background
background_tasks.add_task(asyncio.create_task, run_generation())
return GenerateResponse(
success=True,
images=[],
error=None,
task_id=task_id
)
@app.get("/status/{task_id}", response_model=TaskStatusResponse)
async def get_task_status(task_id: str):
"""Get status of async generation task"""
if task_id not in tasks:
raise HTTPException(status_code=404, detail="Task not found")
task = tasks[task_id]
return TaskStatusResponse(
task_id=task_id,
status=task["status"],
images=task["images"],
error=task["error"]
)
@app.delete("/status/{task_id}")
async def delete_task(task_id: str):
"""Clean up completed task"""
if task_id in tasks:
del tasks[task_id]
return {"deleted": True}
raise HTTPException(status_code=404, detail="Task not found")
@app.post("/grok/chat", response_model=GrokChatResponse)
async def grok_chat(request: GrokChatRequest):
"""
Chat with Grok AI
"""
try:
response = await grok_client.chat(request.message, request.history, request.cookies, request.user_agent)
return GrokChatResponse(response=response)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/video/generate", response_model=VideoGenerateResponse)
async def generate_video(request: VideoGenerateRequest):
"""
Generate a video from a text prompt (and optionally an image) using Meta AI.
This uses the metaai_api library's video generation feature.
- Text-to-Video: Just provide a prompt
- Image-to-Video: Provide a prompt + image_base64
Video generation takes longer than image generation (30-60+ seconds).
Requires:
- prompt: The video generation prompt
- cookies: Facebook/Meta cookies (JSON array or string format)
- image_base64: Optional base64 image data for image-to-video
"""
import json
import asyncio
import base64
import requests as sync_requests
from concurrent.futures import ThreadPoolExecutor
try:
# Parse cookies to dict format for MetaAI
cookies_str = request.cookies.strip()
cookies_dict = {}
if cookies_str.startswith('['):
# JSON array format from Cookie Editor
parsed = json.loads(cookies_str)
if isinstance(parsed, list):
cookies_dict = {c['name']: c['value'] for c in parsed if 'name' in c and 'value' in c}
else:
# String format: "name1=value1; name2=value2"
for pair in cookies_str.split(';'):
pair = pair.strip()
if '=' in pair:
name, value = pair.split('=', 1)
cookies_dict[name.strip()] = value.strip()
if not cookies_dict:
return VideoGenerateResponse(
success=False,
videos=[],
error="No valid cookies provided"
)
# Handle image upload to Litterbox if image_base64 is provided
image_url = None
if request.image_base64:
print(f"[VideoGen] Uploading image to Litterbox for image-to-video...")
try:
# Extract base64 data (remove data:image/...;base64, prefix if present)
image_data = request.image_base64
if ',' in image_data:
image_data = image_data.split(',')[1]
# Decode base64 to bytes
image_bytes = base64.b64decode(image_data)
# Upload to Litterbox (temporary hosting, 1 hour expiry)
litterbox_url = "https://litterbox.catbox.moe/resources/internals/api.php"
files = {
'fileToUpload': ('image.png', image_bytes, 'image/png')
}
data = {
'reqtype': 'fileupload',
'time': '1h' # 1 hour expiry
}
upload_response = sync_requests.post(litterbox_url, files=files, data=data)
if upload_response.status_code == 200 and upload_response.text.startswith('http'):
image_url = upload_response.text.strip()
print(f"[VideoGen] Image uploaded to: {image_url}")
else:
print(f"[VideoGen] Litterbox upload failed: {upload_response.text}")
return VideoGenerateResponse(
success=False,
videos=[],
error=f"Failed to upload image: {upload_response.text[:200]}"
)
except Exception as upload_error:
print(f"[VideoGen] Image upload error: {str(upload_error)}")
return VideoGenerateResponse(
success=False,
videos=[],
error=f"Failed to upload image: {str(upload_error)}"
)
mode = "image-to-video" if image_url else "text-to-video"
print(f"[VideoGen] Starting {mode} for: '{request.prompt[:50]}...'")
# Import MetaAI and run video generation in thread pool (it's synchronous)
from metaai_api import MetaAI
def run_video_gen():
ai = MetaAI(cookies=cookies_dict)
if image_url:
# Image-to-video: Use prompt() with images parameter
result = ai.prompt(
message=request.prompt,
images=[image_url]
)
# Extract video URLs from media
video_urls = []
for media in result.get('media', []):
if media.get('type') == 'VIDEO' and media.get('url'):
video_urls.append(media['url'])
return {
'success': len(video_urls) > 0,
'video_urls': video_urls,
'message': result.get('message', '')
}
else:
# Text-to-video: Use generate_video()
return ai.generate_video(
prompt=request.prompt,
wait_before_poll=10,
max_attempts=60, # Up to 5 minutes of polling
wait_seconds=5,
verbose=True
)
# Run in thread pool since metaai_api is synchronous
loop = asyncio.get_event_loop()
with ThreadPoolExecutor(max_workers=1) as executor:
result = await loop.run_in_executor(executor, run_video_gen)
if not result.get('success', False):
error_msg = result.get('error') or result.get('message') or 'Video generation failed'
print(f"[VideoGen] Failed: {error_msg}")
return VideoGenerateResponse(
success=False,
videos=[],
error=error_msg
)
video_urls = result.get('video_urls', [])
print(f"[VideoGen] Success! Got {len(video_urls)} video(s)")
videos = [
VideoResult(
url=url,
prompt=request.prompt,
model="meta_video"
)
for url in video_urls
]
return VideoGenerateResponse(
success=True,
videos=videos,
conversation_id=result.get('conversation_id')
)
except Exception as e:
import traceback
traceback.print_exc()
print(f"[VideoGen] Error: {str(e)}")
return VideoGenerateResponse(
success=False,
videos=[],
error=str(e)
)

View file

@ -1,189 +0,0 @@
"""
Meta AI Wrapper - Lightweight wrapper around metaai-api library
Uses the mir-ashiq/metaai-api library for actual Meta AI interaction.
This wrapper adds rate limiting and adapts the response format for our API.
To update the library:
pip install -U git+https://github.com/mir-ashiq/metaai-api.git
"""
import asyncio
import json
import time
import random
from concurrent.futures import ThreadPoolExecutor
from typing import Optional
from metaai_api import MetaAI
from .config import settings
from .models import ImageResult
class RateLimiter:
"""Simple rate limiter to prevent shadowban"""
def __init__(self):
self.last_request_time: float = 0
self.request_count_hour: int = 0
self.hour_start: float = time.time()
async def wait_if_needed(self):
"""Wait if rate limit would be exceeded"""
current_time = time.time()
# Reset hourly counter
if current_time - self.hour_start > 3600:
self.request_count_hour = 0
self.hour_start = current_time
# Check hourly limit
if self.request_count_hour >= settings.max_requests_per_hour:
wait_time = 3600 - (current_time - self.hour_start)
if wait_time > 0:
raise Exception(f"Hourly rate limit reached. Try again in {int(wait_time)} seconds.")
# Enforce minimum delay between requests (with jitter)
elapsed = current_time - self.last_request_time
min_delay = settings.rate_limit_delay + random.uniform(0, 5)
if elapsed < min_delay:
await asyncio.sleep(min_delay - elapsed)
self.last_request_time = time.time()
self.request_count_hour += 1
def get_status(self) -> dict:
"""Get current rate limit status"""
current_time = time.time()
time_since_last = current_time - self.last_request_time if self.last_request_time else 0
time_until_reset = max(0, 3600 - (current_time - self.hour_start))
return {
"requests_this_hour": self.request_count_hour,
"max_requests_per_hour": settings.max_requests_per_hour,
"seconds_since_last_request": int(time_since_last),
"seconds_until_hour_reset": int(time_until_reset),
"can_request_now": self.request_count_hour < settings.max_requests_per_hour
}
class MetaAICrawler:
"""
Thin wrapper around metaai-api library.
Handles:
- Cookie format conversion (JSON array to dict)
- Rate limiting
- Response format adaptation
The actual Meta AI interaction is delegated to metaai-api.
"""
def __init__(self):
self.rate_limiter = RateLimiter()
self._executor = ThreadPoolExecutor(max_workers=2)
def _parse_cookies(self, cookies: str) -> dict:
"""Convert cookies from various formats to dict"""
if not cookies:
return {}
# Try JSON array format first
try:
cookies_str = cookies.strip()
if cookies_str.startswith('['):
parsed = json.loads(cookies_str)
if isinstance(parsed, list):
return {c['name']: c['value'] for c in parsed if 'name' in c and 'value' in c}
except json.JSONDecodeError:
pass
# Try cookie string format: "name1=value1; name2=value2"
result = {}
for pair in cookies.split(';'):
pair = pair.strip()
if '=' in pair:
name, value = pair.split('=', 1)
result[name.strip()] = value.strip()
return result
def _generate_sync(self, prompt: str, cookies_dict: dict) -> dict:
"""Synchronous generation using metaai-api"""
ai = MetaAI(cookies=cookies_dict)
return ai.prompt(prompt)
async def generate_images(self, prompt: str, cookies: str, num_images: int = 4) -> list[ImageResult]:
"""
Generate images using Meta AI's Imagine model.
Args:
prompt: The image generation prompt
cookies: Meta AI/Facebook cookies (JSON array or string format)
num_images: Number of images (metaai-api returns 4 by default)
Returns:
List of ImageResult objects with generated image URLs
"""
# Rate limiting
await self.rate_limiter.wait_if_needed()
print(f"[MetaCrawler] Generating images for: '{prompt[:50]}...'")
# Parse cookies
cookies_dict = self._parse_cookies(cookies)
if not cookies_dict:
raise Exception("No valid cookies provided")
# Check for essential cookies
if 'c_user' not in cookies_dict and 'xs' not in cookies_dict:
print("[MetaCrawler] Warning: Missing Facebook auth cookies (c_user, xs)")
# Prepare prompt (add "Imagine" prefix if not present)
image_prompt = prompt if prompt.lower().startswith('imagine') else f"Imagine {prompt}"
# Run in thread pool since metaai_api is synchronous
loop = asyncio.get_event_loop()
try:
result = await loop.run_in_executor(
self._executor,
self._generate_sync,
image_prompt,
cookies_dict
)
except Exception as e:
print(f"[MetaCrawler] Error: {str(e)}")
raise
# Extract media from response
media = result.get('media', [])
if not media:
message = result.get('message', '')
if message:
raise Exception(f"Meta AI response: {message[:200]}")
raise Exception("No images generated")
print(f"[MetaCrawler] Got {len(media)} images!")
# Convert to ImageResult format
images = []
for item in media:
if item.get('type') == 'IMAGE' and item.get('url'):
images.append(ImageResult(
url=item['url'],
prompt=item.get('prompt', prompt),
model="imagine"
))
return images[:num_images] # Limit to requested count
def get_rate_limit_status(self) -> dict:
"""Get current rate limiting status"""
return self.rate_limiter.get_status()
# Singleton instance
meta_crawler = MetaAICrawler()

View file

@ -1,88 +0,0 @@
"""
Pydantic models for request/response schemas
"""
from pydantic import BaseModel, Field
from typing import Optional
from enum import Enum
class TaskStatus(str, Enum):
PENDING = "pending"
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
class GenerateRequest(BaseModel):
"""Request model for image generation"""
prompt: str = Field(..., description="Image generation prompt", min_length=1)
cookies: str = Field(..., description="Meta AI session cookies")
num_images: int = Field(default=4, ge=1, le=8, description="Number of images to generate")
class GrokChatRequest(BaseModel):
"""Request model for Grok chat"""
message: str = Field(..., description="Message content")
history: Optional[list] = Field(default=None, description="Chat history")
cookies: Optional[dict] = Field(default=None, description="Grok session cookies")
user_agent: Optional[str] = Field(default=None, description="Browser User-Agent")
class ImageResult(BaseModel):
"""Single generated image result"""
url: str
data: Optional[str] = None # base64 encoded image data
prompt: str
model: str = "imagine"
class GenerateResponse(BaseModel):
"""Response model for image generation"""
success: bool
images: list[ImageResult] = []
error: Optional[str] = None
task_id: Optional[str] = None
class GrokChatResponse(BaseModel):
"""Response model for Grok chat"""
response: str
error: Optional[str] = None
class TaskStatusResponse(BaseModel):
"""Response model for async task status"""
task_id: str
status: TaskStatus
images: list[ImageResult] = []
error: Optional[str] = None
progress: Optional[int] = None # 0-100
class HealthResponse(BaseModel):
"""Health check response"""
status: str = "healthy"
version: str = "1.0.0"
browser_ready: bool = True
class VideoGenerateRequest(BaseModel):
"""Request model for video generation"""
prompt: str = Field(..., description="Video generation prompt", min_length=1)
cookies: str = Field(..., description="Meta AI session cookies")
image_base64: Optional[str] = Field(default=None, description="Base64 image data for image-to-video (optional)")
class VideoResult(BaseModel):
"""Single generated video result"""
url: str
prompt: str
model: str = "meta_video"
class VideoGenerateResponse(BaseModel):
"""Response model for video generation"""
success: bool
videos: list[VideoResult] = []
error: Optional[str] = None
conversation_id: Optional[str] = None

View file

@ -1,24 +0,0 @@
# Crawl4AI Meta AI Service Dependencies
# Using metaai-api library for Meta AI integration
# Update: pip install -U git+https://github.com/mir-ashiq/metaai-api.git
# Core web framework
fastapi>=0.109.0
uvicorn[standard]>=0.27.0
pydantic>=2.0.0
# Meta AI API library (from GitHub for latest updates)
# Install separately: pip install git+https://github.com/mir-ashiq/metaai-api.git
# Or add to Docker: RUN pip install git+https://github.com/mir-ashiq/metaai-api.git
# Dependencies for metaai-api
requests-html>=0.10.0
lxml_html_clean>=0.4.0
# Grok Chat API
# Install: pip install git+https://github.com/realasfngl/Grok-Api.git
# Or add to Docker: RUN pip install git+https://github.com/realasfngl/Grok-Api.git
# Browser Automation (for Cloudflare bypass)
playwright>=1.41.0
playwright-stealth>=1.0.6

1
services/metaai-api Submodule

@ -0,0 +1 @@
Subproject commit 8f4ac67c01703e0c0e0c2b1cfd70a6d9b53fc9a8