This commit is contained in:
phamhungd 2025-11-28 12:43:39 +07:00
parent 5fb1a44e9f
commit f1cd681398
6 changed files with 492 additions and 115 deletions

View file

@ -21,3 +21,13 @@ aPix Image Workspace là một giao diện Flask nhẹ giúp bạn tạo hình
1. Đặt biến môi trường `GOOGLE_API_KEY` với API key của Google GenAI hoặc nhập trực tiếp trong giao diện.
2. Mở trình duyệt tới `http://127.0.0.1:8888`, nhập prompt, chọn tùy chọn và nhấn Generate.
3. Hình ảnh: `static/generated` lưu nội dung mới nhất, còn `/gallery` trả về URL cho phần lịch sử.
### Cú pháp đặc biệt
Ứng dụng hỗ trợ cú pháp placeholder để tạo nhiều biến thể ảnh hoặc thay thế nội dung linh hoạt:
* **Placeholder:** Sử dụng `{text}` hoặc `[text]` trong prompt. Ví dụ: `A photo of a {animal} in the style of {style}`.
* **Trường Note:** Nội dung trong trường Note sẽ thay thế cho placeholder:
* **Thay thế đơn:** Nếu Note là `cat`, prompt sẽ thành `A photo of a cat...`.
* **Hàng đợi (Queue):** Nếu Note chứa ký tự `|` (ví dụ: `cat|dog|bird`), ứng dụng sẽ tự động tạo 3 ảnh lần lượt với `cat`, `dog`, và `bird`.
* **Nhiều dòng:** Nếu Note có nhiều dòng, mỗi dòng sẽ ứng với một lần tạo ảnh.
* **Mặc định:** Nếu Note để trống, placeholder sẽ giữ nguyên hoặc dùng giá trị mặc định nếu có (ví dụ `{cat|dog}` sẽ tạo 2 ảnh nếu Note trống).

174
app.py
View file

@ -16,7 +16,7 @@ import logging
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
log.setLevel(logging.WARNING)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
PREVIEW_MAX_DIMENSION = 1024
@ -192,6 +192,132 @@ os.makedirs(GENERATED_DIR, exist_ok=True)
UPLOADS_DIR = os.path.join(app.static_folder, 'uploads')
os.makedirs(UPLOADS_DIR, exist_ok=True)
def process_prompt_with_placeholders(prompt, note):
"""
Process prompt with {text} or [text] placeholders.
Logic:
1. If prompt has placeholders:
- If note is empty:
- If placeholder contains pipes (e.g. {cat|dog} or [cat|dog]), generate multiple prompts
- If no pipes, keep placeholder as is
- If note has content:
- If note has pipes (|), split note and replace placeholders for each segment (queue)
- If note has newlines, split note and replace placeholders sequentially
- If single note, replace all placeholders with note content
2. If no placeholders:
- Standard behavior: "{prompt}. {note}"
Returns:
list: List of processed prompts
"""
import re
# Regex to find placeholders: {text} or [text]
# Matches {content} or [content]
placeholder_pattern = r'\{([^{}]+)\}|\[([^\[\]]+)\]'
placeholders = re.findall(placeholder_pattern, prompt)
# Flatten the list of tuples from findall and filter empty strings
# re.findall with groups returns list of tuples like [('content', ''), ('', 'content')]
placeholders = [p[0] or p[1] for p in placeholders if p[0] or p[1]]
if not placeholders:
# Standard behavior
return [f"{prompt}. {note}" if note else prompt]
# If note is empty, check for default values in placeholders
if not note:
# Check if any placeholder has pipe-separated values
# We only handle the FIRST placeholder with pipes for combinatorial generation to keep it simple
# or we could generate for all, but let's stick to the requirement: "creates multiple commands"
# Find the first placeholder that has options
target_placeholder = None
options = []
for p in placeholders:
if '|' in p:
target_placeholder = p
options = p.split('|')
break
if target_placeholder:
# Generate a prompt for each option
generated_prompts = []
for option in options:
# Replace the target placeholder with the option
# We need to handle both {placeholder} and [placeholder]
# Construct regex that matches either {target} or [target]
escaped_target = re.escape(target_placeholder)
pattern = f'(\\{{{escaped_target}\\}}|\\[{escaped_target}\\])'
# Replace only the first occurrence or all?
# Usually all occurrences of the same placeholder string
new_prompt = re.sub(pattern, option.strip(), prompt)
generated_prompts.append(new_prompt)
return generated_prompts
# No pipes in placeholders, return prompt as is (placeholders remain)
return [prompt]
# Note has content
if '|' in note:
# Split note by pipe and generate a prompt for each segment
note_segments = [s.strip() for s in note.split('|') if s.strip()]
generated_prompts = []
for segment in note_segments:
current_prompt = prompt
# Replace all placeholders with this segment
# We need to replace all found placeholders
for p in placeholders:
escaped_p = re.escape(p)
pattern = f'(\\{{{escaped_p}\\}}|\\[{escaped_p}\\])'
current_prompt = re.sub(pattern, segment, current_prompt)
generated_prompts.append(current_prompt)
return generated_prompts
elif '\n' in note:
# Split note by newline and replace placeholders sequentially
note_lines = [l.strip() for l in note.split('\n') if l.strip()]
current_prompt = prompt
for i, p in enumerate(placeholders):
replacement = ""
if i < len(note_lines):
replacement = note_lines[i]
else:
# If fewer lines than placeholders, use default (content inside braces)
# If default has pipes, take the first one
if '|' in p:
replacement = p.split('|')[0]
else:
# Keep the placeholder text but remove braces?
# Or keep the original placeholder?
# Requirement says: "remaining placeholders use their default text"
replacement = p
escaped_p = re.escape(p)
pattern = f'(\\{{{escaped_p}\\}}|\\[{escaped_p}\\])'
# Replace only the first occurrence of this specific placeholder to allow sequential mapping
# But if multiple placeholders have SAME text, this might be ambiguous.
# Assuming placeholders are unique or processed left-to-right.
# re.sub replaces all by default, count=1 replaces first
current_prompt = re.sub(pattern, replacement, current_prompt, count=1)
return [current_prompt]
else:
# Single note content, replace all placeholders
current_prompt = prompt
for p in placeholders:
escaped_p = re.escape(p)
pattern = f'(\\{{{escaped_p}\\}}|\\[{escaped_p}\\])'
current_prompt = re.sub(pattern, note, current_prompt)
return [current_prompt]
@app.route('/')
def index():
return render_template('index.html')
@ -206,6 +332,7 @@ def generate_image():
note = form.get('note', '')
aspect_ratio = form.get('aspect_ratio')
resolution = form.get('resolution', '2K')
model = form.get('model', 'gemini-3-pro-image-preview')
api_key = form.get('api_key') or os.environ.get('GOOGLE_API_KEY')
reference_files = request.files.getlist('reference_images')
reference_paths_json = form.get('reference_image_paths')
@ -215,6 +342,7 @@ def generate_image():
note = data.get('note', '')
aspect_ratio = data.get('aspect_ratio')
resolution = data.get('resolution', '2K')
model = data.get('model', 'gemini-3-pro-image-preview')
api_key = data.get('api_key') or os.environ.get('GOOGLE_API_KEY')
reference_files = []
reference_paths_json = data.get('reference_image_paths')
@ -229,9 +357,11 @@ def generate_image():
print("Đang gửi lệnh...", flush=True)
client = genai.Client(api_key=api_key)
image_config_args = {
"image_size": resolution
}
image_config_args = {}
# Only add resolution if NOT using flash model
if model != 'gemini-2.5-flash-image':
image_config_args["image_size"] = resolution
if aspect_ratio and aspect_ratio != 'Auto':
image_config_args["aspect_ratio"] = aspect_ratio
@ -239,8 +369,25 @@ def generate_image():
# Process reference paths and files
final_reference_paths = []
# Merge prompt with note for API call, but keep originals for metadata
api_prompt = f"{prompt}. {note}" if note else prompt
# Process prompt with placeholders - returns list of prompts
processed_prompts = process_prompt_with_placeholders(prompt, note)
# If multiple prompts (queue scenario), return them to frontend for queue processing
if len(processed_prompts) > 1:
return jsonify({
'queue': True,
'prompts': processed_prompts,
'metadata': {
'original_prompt': prompt,
'original_note': note,
'aspect_ratio': aspect_ratio or 'Auto',
'resolution': resolution,
'model': model
}
})
# Single prompt - continue with normal generation
api_prompt = processed_prompts[0]
contents = [api_prompt]
# Parse reference paths from frontend
@ -323,8 +470,8 @@ def generate_image():
print(f"Error processing uploaded file: {e}")
continue
model_name = "gemini-3-pro-image-preview"
print("Đang tạo...", flush=True)
model_name = model
print(f"Đang tạo với model {model_name}...", flush=True)
response = client.models.generate_content(
model=model_name,
contents=contents,
@ -366,8 +513,8 @@ def generate_image():
image_url = url_for('static', filename=rel_path)
metadata = {
'prompt': prompt,
'note': note,
'prompt': api_prompt, # Store the processed prompt
'note': '', # Note is already merged into prompt
'aspect_ratio': aspect_ratio or 'Auto',
'resolution': resolution,
'reference_images': final_reference_paths,
@ -942,5 +1089,10 @@ def refine_prompt():
except Exception as e:
return jsonify({'error': str(e)}), 500
port_sever = 8888
if __name__ == '__main__':
app.run(debug=True, port=8888)
# Use ANSI green text so the startup banner stands out in terminals
print("\033[32m" + "aPix Image Workspace running at:" + "\033[0m", flush=True)
print("\033[32m" + f"http://localhost:{port_sever}" + " " + "\033[0m", flush=True)
print("\033[32m" + f"http://127.0.0.1:{port_sever}" + "\033[0m", flush=True)
app.run(debug=True, port=port_sever)

View file

@ -305,5 +305,43 @@ export function createGallery({ galleryGrid, onSelect }) {
return showOnlyFavorites;
}
function navigate(direction) {
const activeItem = galleryGrid.querySelector('.gallery-item.active');
if (!activeItem) {
// If nothing active, select the first item on any arrow key
const firstItem = galleryGrid.querySelector('.gallery-item');
if (firstItem) {
firstItem.click();
firstItem.scrollIntoView({ behavior: 'smooth', block: 'nearest' });
}
return;
}
let targetItem;
if (direction === 'prev') {
targetItem = activeItem.previousElementSibling;
} else if (direction === 'next') {
targetItem = activeItem.nextElementSibling;
}
if (targetItem && targetItem.classList.contains('gallery-item')) {
targetItem.click();
targetItem.scrollIntoView({ behavior: 'smooth', block: 'nearest' });
}
}
// Setup keyboard navigation
document.addEventListener('keydown', (e) => {
// Ignore if user is typing in an input
if (e.target.tagName === 'INPUT' || e.target.tagName === 'TEXTAREA') return;
if (e.key === 'ArrowLeft') {
navigate('prev');
} else if (e.key === 'ArrowRight') {
navigate('next');
}
});
return { load, setFilter, getCurrentFilter, setSearch, getSearchQuery, toggleFavorites, isFavoritesActive };
}

View file

@ -48,6 +48,16 @@ const docsContent = {
'API key và prompt được lưu để lần sau không phải nhập lại',
],
},
{
heading: 'Cú pháp đặc biệt',
items: [
'Placeholder: Dùng {text} hoặc [text] trong prompt (VD: A photo of a {animal})',
'Note đơn: Nội dung Note sẽ thay thế cho placeholder',
'Note hàng đợi: Dùng dấu | để tạo nhiều ảnh (VD: cat|dog|bird)',
'Note nhiều dòng: Mỗi dòng tương ứng một lần tạo ảnh',
'Mặc định: Nếu Note trống, dùng giá trị trong ngoặc (VD: {cat|dog} tạo 2 ảnh)',
],
},
],
};
@ -98,6 +108,97 @@ document.addEventListener('DOMContentLoaded', () => {
const refineInstructionInput = document.getElementById('refine-instruction');
const confirmRefineBtn = document.getElementById('confirm-refine-btn');
// --- Helper Functions (Moved to top to avoid hoisting issues) ---
// Model Selection Logic
const apiModelSelect = document.getElementById('api-model');
const resolutionGroup = resolutionInput.closest('.input-group');
function toggleResolutionVisibility() {
if (apiModelSelect && apiModelSelect.value === 'gemini-2.5-flash-image') {
resolutionGroup.classList.add('hidden');
} else {
resolutionGroup.classList.remove('hidden');
}
}
if (apiModelSelect) {
apiModelSelect.addEventListener('change', () => {
toggleResolutionVisibility();
persistSettings();
});
}
// Load Settings
function loadSettings() {
try {
const saved = localStorage.getItem(SETTINGS_STORAGE_KEY);
if (saved) {
const settings = JSON.parse(saved);
if (settings.apiKey) apiKeyInput.value = settings.apiKey;
if (settings.prompt) promptInput.value = settings.prompt;
if (settings.note) promptNoteInput.value = settings.note;
if (settings.aspectRatio) aspectRatioInput.value = settings.aspectRatio;
if (settings.resolution) resolutionInput.value = settings.resolution;
if (settings.model && apiModelSelect) {
apiModelSelect.value = settings.model;
toggleResolutionVisibility();
}
return settings;
}
} catch (e) {
console.warn('Failed to load settings', e);
}
return {};
}
function persistSettings() {
// Check if slotManager is initialized
const referenceImages = (typeof slotManager !== 'undefined') ? slotManager.getImages() : [];
const settings = {
apiKey: apiKeyInput.value,
prompt: promptInput.value,
note: promptNoteInput.value,
aspectRatio: aspectRatioInput.value,
resolution: resolutionInput.value,
model: apiModelSelect ? apiModelSelect.value : 'gemini-3-pro-image-preview',
referenceImages: referenceImages,
};
try {
localStorage.setItem(SETTINGS_STORAGE_KEY, JSON.stringify(settings));
} catch (e) {
console.warn('Failed to save settings', e);
}
}
// Helper to build form data for generation
function buildGenerateFormData({ prompt, note, aspect_ratio, resolution, api_key, model }) {
const formData = new FormData();
formData.append('prompt', prompt);
formData.append('note', note);
formData.append('aspect_ratio', aspect_ratio);
formData.append('resolution', resolution);
formData.append('api_key', api_key);
const selectedModel = model || (apiModelSelect ? apiModelSelect.value : 'gemini-3-pro-image-preview');
formData.append('model', selectedModel);
// Add reference images using correct slotManager methods
const referenceFiles = slotManager.getReferenceFiles();
referenceFiles.forEach(file => {
formData.append('reference_images', file);
});
const referencePaths = slotManager.getReferencePaths();
if (referencePaths && referencePaths.length > 0) {
formData.append('reference_image_paths', JSON.stringify(referencePaths));
}
return formData;
}
// --- End Helper Functions ---
let zoomLevel = 1;
let panOffset = { x: 0, y: 0 };
let isPanning = false;
@ -202,15 +303,31 @@ document.addEventListener('DOMContentLoaded', () => {
let generationQueue = [];
let isProcessingQueue = false;
let pendingRequests = 0; // Track requests waiting for backend response
function updateQueueCounter() {
// Count includes current processing item + items in queue
const count = generationQueue.length + (isProcessingQueue ? 1 : 0);
// Count includes:
// 1. Items waiting in queue
// 2. Item currently being processed (isProcessingQueue)
// 3. Items waiting for backend response (pendingRequests)
const count = generationQueue.length + (isProcessingQueue ? 1 : 0) + pendingRequests;
console.log('Queue counter update:', {
queue: generationQueue.length,
processing: isProcessingQueue,
pending: pendingRequests,
total: count
});
if (count > 0) {
queueCounter.classList.remove('hidden');
queueCountText.textContent = count;
if (queueCounter) {
queueCounter.classList.remove('hidden');
queueCountText.textContent = count;
}
} else {
queueCounter.classList.add('hidden');
if (queueCounter) {
queueCounter.classList.add('hidden');
}
}
}
@ -224,35 +341,58 @@ document.addEventListener('DOMContentLoaded', () => {
// Take task from queue FIRST, then update state
const task = generationQueue.shift();
isProcessingQueue = true;
updateQueueCounter();
updateQueueCounter(); // Show counter immediately
try {
setViewState('loading');
const formData = buildGenerateFormData({
prompt: task.prompt,
note: task.note || '',
aspect_ratio: task.aspectRatio,
resolution: task.resolution,
api_key: task.apiKey,
});
const response = await fetch('/generate', {
method: 'POST',
body: formData,
});
const data = await response.json();
if (!response.ok) {
throw new Error(data.error || 'Failed to generate image');
}
if (data.image) {
displayImage(data.image, data.image_data);
// Check if this task already has a result (immediate generation)
if (task.immediateResult) {
// Display the already-generated image
displayImage(task.immediateResult.image, task.immediateResult.image_data);
gallery.load();
} else {
throw new Error('No image data received');
// Need to generate the image
const formData = buildGenerateFormData({
prompt: task.prompt,
note: task.note || '',
aspect_ratio: task.aspectRatio,
resolution: task.resolution,
api_key: task.apiKey,
model: task.model,
});
const response = await fetch('/generate', {
method: 'POST',
body: formData,
});
const data = await response.json();
if (!response.ok) {
throw new Error(data.error || 'Failed to generate image');
}
if (data.image) {
displayImage(data.image, data.image_data);
gallery.load();
} else if (data.queue && data.prompts && Array.isArray(data.prompts)) {
// Backend returned more items - add them to queue
console.log('Backend returned additional queue items:', data.prompts.length);
data.prompts.forEach(processedPrompt => {
generationQueue.push({
prompt: processedPrompt,
note: '',
aspectRatio: task.aspectRatio,
resolution: task.resolution,
apiKey: task.apiKey,
model: task.model,
});
});
updateQueueCounter();
} else {
throw new Error('No image data received');
}
}
} catch (error) {
showError(error.message);
@ -264,12 +404,13 @@ document.addEventListener('DOMContentLoaded', () => {
}
}
function addToQueue() {
async function addToQueue() {
const prompt = promptInput.value.trim();
const note = promptNoteInput.value.trim();
const aspectRatio = aspectRatioInput.value;
const resolution = resolutionInput.value;
const apiKey = apiKeyInput.value.trim();
const selectedModel = apiModelSelect?.value || 'gemini-3-pro-image-preview';
if (!apiKey) {
openApiSettings();
@ -281,19 +422,96 @@ document.addEventListener('DOMContentLoaded', () => {
return;
}
// Store original prompt and note separately
generationQueue.push({
prompt: prompt,
note: note,
aspectRatio,
resolution,
apiKey
});
// Show loading state if not already processing and this is the first request
if (!isProcessingQueue && pendingRequests === 0) {
setViewState('loading');
}
// Increment pending requests and update counter immediately
pendingRequests++;
updateQueueCounter();
if (!isProcessingQueue) {
processNextInQueue();
let fetchCompleted = false;
try {
const formData = buildGenerateFormData({
prompt: prompt,
note: note,
aspect_ratio: aspectRatio,
resolution: resolution,
api_key: apiKey,
model: selectedModel,
});
const response = await fetch('/generate', {
method: 'POST',
body: formData,
});
const data = await response.json();
// Mark fetch as completed and decrement pending
// We do this BEFORE adding to queue to avoid double counting
fetchCompleted = true;
pendingRequests--;
if (!response.ok) {
throw new Error(data.error || 'Failed to generate image');
}
// Check if backend returned a queue
if (data.queue && data.prompts && Array.isArray(data.prompts)) {
console.log('Backend returned queue with', data.prompts.length, 'prompts');
// Add all prompts to the queue
data.prompts.forEach(processedPrompt => {
generationQueue.push({
prompt: processedPrompt,
note: '',
aspectRatio,
resolution,
apiKey,
model: selectedModel,
});
});
} else if (data.image) {
console.log('Backend returned single image');
// Single image - add to queue for consistent processing
generationQueue.push({
prompt: prompt,
note: note,
aspectRatio,
resolution,
apiKey,
model: selectedModel,
immediateResult: {
image: data.image,
image_data: data.image_data
}
});
} else {
throw new Error('Unexpected response from server');
}
// Update counter after adding to queue
updateQueueCounter();
// Start processing queue only if not already processing
if (!isProcessingQueue) {
console.log('Starting queue processing');
processNextInQueue();
} else {
console.log('Already processing, item added to queue');
}
} catch (error) {
console.error('Error in addToQueue:', error);
// If fetch failed (didn't complete), we need to decrement pendingRequests
if (!fetchCompleted) {
pendingRequests--;
}
updateQueueCounter();
showError(error.message);
}
}
@ -1323,7 +1541,14 @@ document.addEventListener('DOMContentLoaded', () => {
function applyMetadata(metadata) {
if (!metadata) return;
if (metadata.prompt) promptInput.value = metadata.prompt;
if (metadata.note) promptNoteInput.value = metadata.note;
// If metadata doesn't have 'note' field, set to empty string instead of keeping current value
if (metadata.hasOwnProperty('note')) {
promptNoteInput.value = metadata.note || '';
} else {
promptNoteInput.value = '';
}
if (metadata.aspect_ratio) aspectRatioInput.value = metadata.aspect_ratio;
if (metadata.resolution) resolutionInput.value = metadata.resolution;
@ -1381,62 +1606,6 @@ document.addEventListener('DOMContentLoaded', () => {
}
}
function buildGenerateFormData(fields) {
const formData = new FormData();
Object.entries(fields).forEach(([key, value]) => {
if (value !== undefined && value !== null) {
formData.append(key, value);
}
});
slotManager.getReferenceFiles().forEach(file => {
formData.append('reference_images', file, file.name);
});
const referencePaths = slotManager.getReferencePaths();
if (referencePaths && referencePaths.length > 0) {
formData.append('reference_image_paths', JSON.stringify(referencePaths));
}
return formData;
}
function loadSettings() {
if (typeof localStorage === 'undefined') return {};
try {
const saved = localStorage.getItem(SETTINGS_STORAGE_KEY);
if (!saved) return {};
const { apiKey, aspectRatio, resolution, prompt, promptNote, referenceImages } = JSON.parse(saved);
if (apiKey) apiKeyInput.value = apiKey;
if (aspectRatio) aspectRatioInput.value = aspectRatio;
if (resolution) resolutionInput.value = resolution;
if (prompt) promptInput.value = prompt;
if (promptNote) promptNoteInput.value = promptNote;
return { apiKey, aspectRatio, resolution, prompt, promptNote, referenceImages };
} catch (error) {
console.warn('Unable to load cached settings', error);
return {};
}
}
function persistSettings() {
if (typeof localStorage === 'undefined') return;
try {
const settings = {
apiKey: apiKeyInput.value.trim(),
aspectRatio: aspectRatioInput.value,
resolution: resolutionInput.value,
prompt: promptInput.value.trim(),
promptNote: promptNoteInput.value.trim(),
referenceImages: slotManager.serializeReferenceImages(),
};
localStorage.setItem(SETTINGS_STORAGE_KEY, JSON.stringify(settings));
} catch (error) {
console.warn('Unable to persist settings', error);
}
}
function handleGenerateShortcut(event) {
if ((event.ctrlKey || event.metaKey) && event.key === 'Enter') {

View file

@ -1779,22 +1779,20 @@ button#generate-btn:disabled {
align-items: center;
gap: 0.5rem;
padding: 0.5rem 0.75rem;
background: rgba(0, 0, 0, 0.6);
background: rgba(0, 0, 0, 0.8);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
border: 1px solid rgba(255, 255, 255, 0.2);
border-radius: 20px;
color: var(--text-primary);
font-size: 0.875rem;
font-weight: 500;
z-index: 100;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
font-weight: 600;
z-index: 9999;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.4);
transition: all 0.3s ease;
}
.queue-counter.hidden {
opacity: 0;
transform: translateY(-10px);
pointer-events: none;
display: none;
}
.queue-icon {

View file

@ -410,6 +410,16 @@
rel="noreferrer">aistudio.google.com/api-keys</a>
</p>
</div>
<div class="input-group api-settings-input-group">
<label for="api-model">Model</label>
<div class="select-wrapper">
<select id="api-model"
style="width: 100%; padding: 0.75rem; background: rgba(255, 255, 255, 0.05); border: 1px solid rgba(255, 255, 255, 0.1); border-radius: 0.5rem; color: var(--text-primary); font-size: 0.9rem;">
<option value="gemini-3-pro-image-preview">Gemini 3 Pro (Image Preview)</option>
<option value="gemini-2.5-flash-image">Gemini 2.5 Flash Image</option>
</select>
</div>
</div>
<div class="controls-footer" style="justify-content: flex-end; margin-top: 0.5rem;">
<button id="save-api-settings-btn">
<span>Đóng</span>