fix: vision processor arguments and add nas compose file

This commit is contained in:
SysVis AI 2025-12-28 20:09:59 +07:00
parent d74ae097af
commit ae205ad37a
2 changed files with 158 additions and 1 deletions

157
docker-compose.nas.yml Normal file
View file

@ -0,0 +1,157 @@
version: '3.8'
services:
# -------------------------------------------------------------------
# KV Graph (System Architecture Visualizer)
# -------------------------------------------------------------------
kv-graph:
image: vndangkhoa/sys-arc-visl:latest
container_name: kv-graph-web
restart: always
ports:
- "8338:80"
networks:
- syno-global-net
depends_on:
- ollama
# -------------------------------------------------------------------
# OLLAMA Language Model Server (GPU)
# -------------------------------------------------------------------
ollama:
container_name: OLLAMA
image: ollama/ollama:latest
volumes:
- /volume2/docker/ollama/data:/root/.ollama:rw
environment:
# --- Server Config ---
OLLAMA_HOST: "0.0.0.0"
# [CRITICAL] Allows the KV-Graph browser app to connect via the Nginx Proxy
OLLAMA_ORIGINS: "*"
# Keep model loaded in VRAM (Faster response, -1 = infinite)
OLLAMA_KEEP_ALIVE: -1
# Note: 'OLLAMA_INSTALL_MODELS' is not supported by the official image.
# You must run this command *once* after starting:
# docker exec -it OLLAMA ollama pull llama3.2
ports:
- "11434:11434"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [ gpu ]
healthcheck:
test: [ "CMD", "ollama", "--version" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
restart: on-failure:5
networks:
- ai-network
- syno-global-net
# -------------------------------------------------------------------
# Open WebUI (Interface for Ollama)
# -------------------------------------------------------------------
webui:
container_name: OLLAMA-WEBUI
image: ghcr.io/open-webui/open-webui:latest
volumes:
- /volume2/docker/ollama/webui:/app/backend/data:rw
- /volume2/docker/ollama/upload:/app/backend/data/uploads:rw
environment:
OLLAMA_BASE_URL: http://ollama:11434
OLLAMA_API_KEY: "e5a94e19c78146c7a02f57c67bc5e69d.m2xzXExQUYAtitiEK6NO6f6O"
WEBUI_SECRET_KEY: "dOxZYTTZgXKMHkqLBIQVImayQXAVWdzGBPuFJKggzcgvgPJPXpWzqzKaUOIOGGIr"
ENABLE_UPLOADS: "true"
UPLOAD_DIR: /app/backend/data/uploads
ports:
- "8889:8080"
restart: on-failure
depends_on:
ollama:
condition: service_healthy
networks:
- ai-network
# -------------------------------------------------------------------
# N8N Database (PostgreSQL)
# -------------------------------------------------------------------
db:
image: postgres:17
container_name: n8n-DB
hostname: n8n-db
security_opt:
- no-new-privileges:true
healthcheck:
test: [ "CMD", "pg_isready", "-q", "-d", "n8n", "-U", "n8nuser" ]
timeout: 45s
interval: 10s
retries: 10
volumes:
- /volume2/docker/n8n/db:/var/lib/postgresql/data:rw
environment:
TZ: Asia/Ho_Chi_Minh
POSTGRES_DB: n8n
POSTGRES_USER: n8nuser
POSTGRES_PASSWORD: n8npass
restart: on-failure:5
networks:
- ai-network
# -------------------------------------------------------------------
# N8N Workflow Automation
# -------------------------------------------------------------------
n8n:
image: n8nio/n8n:next
container_name: n8n
hostname: n8n
user: 0:0
security_opt:
- no-new-privileges:true
ports:
- "5678:5678"
volumes:
- /volume2/docker/n8n/data:/root/.n8n:rw
- /volume2/docker/n8n/files:/files:rw
environment:
N8N_HOST: n8n.khoavo.myds.me
WEBHOOK_URL: https://n8n.khoavo.myds.me
GENERIC_TIMEZONE: Asia/Ho_Chi_Minh
TZ: Asia/Ho_Chi_Minh
N8N_PORT: 5678
N8N_PROTOCOL: https
NODE_ENV: production
N8N_ENCRYPTION_KEY: xzwqZIEhDyVFOyMghGIIeVEACxtShfbhwzorVYhiUkpSVGKcSgVjbBHxgmxKiejo
N8N_SECURE_COOKIE: "false"
DB_TYPE: postgresdb
DB_POSTGRESDB_DATABASE: n8n
DB_POSTGRESDB_HOST: n8n-db
DB_POSTGRESDB_PORT: 5432
DB_POSTGRESDB_USER: n8nuser
DB_POSTGRESDB_PASSWORD: n8npass
N8N_AI_CORE_CONNECTION_TIMEOUT: 300000
depends_on:
db:
condition: service_healthy
networks:
- ai-network
- syno-global-net
networks:
ai-network:
driver: bridge
ipam:
config:
- subnet: 172.32.0.0/24
gateway: 172.32.0.1
syno-global-net:
external: true

View file

@ -91,7 +91,7 @@ export class VisionService {
// Task: Detailed Captioning is best for understanding diagrams // Task: Detailed Captioning is best for understanding diagrams
const text = '<MORE_DETAILED_CAPTION>'; const text = '<MORE_DETAILED_CAPTION>';
const inputs = await this.processor(text, image); const inputs = await this.processor(image, text);
const generatedIds = await this.model.generate({ const generatedIds = await this.model.generate({
...inputs, ...inputs,