mirror of
https://github.com/labring/FastGPT.git
synced 2026-05-08 01:08:43 +08:00
7506a147e6
* batch node (#6732) * batch node * docs: add local code quality standards and style guides for automated review * refactor: remove enforced minimum for parallel concurrency, simplify edge handling in task runtime context, and fix loop output mapping * feat: auto-infer and sync valueType for parallel loop input and output based on referenced array source * fix: refactor parallelRun output type synchronization and improve sub-workflow error handling in dispatch service * feat: enforce parallel concurrency limits and validate against workflow loop constraints * feat: implement retry mechanism for parallel workflow tasks with usage tracking per attempt * fix review * perf: use function * refactor: abstract nested node logic into useNestedNode hook and update parallelRun icon/service logic * fix: type import * refactor: update ParallelRunStatusEnum and i18n labels for improved status clarity * feat: parallel run details and input/output display to chat response modal and service dispatch * fix: config limit error * refactor: optimize parallel run task execution, fix point accumulation, and improve error handling for sub-workflows * fix: include totalPoints in parallel task results * refactor: centralize nested input injection and point safety utilities for workflow dispatchers * test: add unit tests for safePoints utility function * refactor: update parallel workflow runtime types and clean up docstring placement in dispatch utils * fix: include all runtime nodes in parallel execution to ensure variable reference accessibility * refactor: update pushSubWorkflowUsage signature to use object parameter for improved consistency --------- Co-authored-by: DigHuang <114602213+DigHuang@users.noreply.github.com> * feat(s3): add proxy transfer mode with tokenized upload/download (#6729) * feat(s3): add proxy transfer mode with tokenized upload/download * wip: switch to proxy mode for upload progress * fix: office mime types * fix(s3): upload MIME validation, multer whitelist, API error status - Treat AVI/MPEG mime aliases (incl. video/mp1s vs video/mpeg) as matching - Optional allowedExtensions on multer for dataset images and localFile - Map S3/business errors to 4xx in jsonRes where appropriate - Align presign max size with team plan; fix dataset import size UX - Add upload validation tests Made-with: Cursor * fix: show clear message when upload frequency limit is exceeded - Reject ERROR_ENUM.uploadFileIntervalLimit from authFrequencyLimit instead of Mongo doc - Add i18n for upload_file_interval_limit (zh-CN/en/zh-Hant) Made-with: Cursor * fix file token validation and upload mime checks * fix: test * fix(s3): treat m4a audio/mp4 and audio/x-m4a as equivalent - Add MIME equivalence group for AAC/M4A container mismatch (mime-types vs file-type) - Add upload validation test for minimal ftyp/M4A buffer - Test env: keep FILE_TOKEN_KEY in vitest test.env and test/setup.ts (drop loadTestEnv file) Made-with: Cursor * fix(chat): 调试区文件类型与编辑态一致,并修复 accept 在 WebKit 下不更新 - ChatTest: 用 getAppChatConfig + getGuideModule 合并画布引导节点与 chatConfig - useChatTest: 依赖 fileSelectConfig 序列化与 chatConfig,避免深层变更未触发预览更新 - useSelectFile: 用 useCallback + input key 替代 useMemoizedFn,确保 accept 变更后重建 input Made-with: Cursor * fix: invalid request * feat: prompt inject (#6757) * feat: resume chat stream (#6722) * fix: openapi schema issue while creating openapi json * feat: resume chat stream * wip: chat status and read status * feat: sync chat side bar status * fix: allow reassignment of variables in chatTest handler Made-with: Cursor * feat(chat): stream resume hardening, resume modules in @fastgpt/service, stale generating cron - Move stream resume mirror + resumeStatus into packages/service; update API imports - chatTest: ensurePendingChatRoundItems, default responseChatItemId; zod default import for client - useChatTest + HomeChatWindow: enableAutoResume and sync init chatGenerateStatus - ChatContext: safe no-op defaults without provider - Cron: clean MongoChat stuck in generating >30min; timer lock cleanStaleGeneratingChat Made-with: Cursor * fix(chat): address stream-resume PR review (zod/mongoose enum, legacy status, upsert, UI race) - Zod: use z.nativeEnum(ChatGenerateStatusEnum); mongoose chatGenerateStatus enum as [0,1,2] only - Init APIs: default missing chatGenerateStatus to done before read/unread logic - ensurePendingChatRoundItems: unique index + upsert; rename ChatGenerateStatusEnum - ChatBox auto-resume: guard by chatId; sidebar sync via targetChatId - Tests: chat history/feedback APIs pass with schema fixes Made-with: Cursor * fix(chat): expose resume at /api/v2/chat/resume; openapi + review tidy - Move handler from v1/stream to v2/chat/resume (pairs with v2 completions + Redis mirror) - Update fetch, OpenAPI AIPath, comments; remove slim projects/app global chat api - getHistoryStatus default chatGenerateStatus; team init + chatTest notes; ChatItem tweak Made-with: Cursor * fix(chat): fix resume JSON parse catch shadowing; drop unused resumeChatStream Made-with: Cursor * docs(chat): comment closed+stream mirror write path in workflow dispatch Made-with: Cursor * refactor: unify resumable stream mirroring * fix: keep v1 chat completions out of resume flow * refactor: make prepared chat rounds transactional * fix: handle resume stream terminal errors * fix: rerank max token * feat(workflow): extend variable update node with Number/Boolean/Array operations (#6752) * feat(workflow): extend variable update node with Number/Boolean/Array ops * feat: math operator icons and refactor variable update renderers for improved layout and consistency * chore(workflow): clean up variable update types and restore icon cleanup * feat: add test * fix:md_ascii_bug (#6755) * md_ascii_bug * md_ascii_bug * md_ascii_bug * md_ascii_bug * md_ascii_bug * perf: test --------- Co-authored-by: archer <545436317@qq.com> * doc * del dataset * perf: date auto coerce * doc * add test * perf: channel setting * doc * fix: chat resume stream (#6759) * refactor(api): move stream resume to /api/core/chat/resume Relocate resume handler from pages/api/v2 to pages/api/core, update OpenAPI paths, frontend streamResumeFetch URL, tests, and comments. Made-with: Cursor * fix: remove stray conflict markers; use z.nativeEnum for chatGenerateStatus Made-with: Cursor * fix: use enum instead of nativeEnum * fix(chat): address resume review suggestions * fix(chat): require sse when resuming generating chats * revert(chat): keep chatitem dataId index non-unique * fix: ts * fix doc * fix(chat): gate stream resume mirror by header (#6760) * fix: remove stray conflict markers; use z.nativeEnum for chatGenerateStatus Made-with: Cursor * fix: use enum instead of nativeEnum * fix(chat): address resume review suggestions * fix(chat): require sse when resuming generating chats * feat(chat): gate stream resume mirror by header * refactor(chat): decouple resume mirror header parsing * perf: dataset queue * fix: multipleselect * perf: workflow bug * doc * doc * perf: deploy yml;fix: child nodes watch * adapt embedding model defaultconfig * install shell * add mcp zod check * feat: http tool zod schema * Feat/batch UI (#6763) * feat: aggregate parallel run results into task-specific virtual nodes and update UI to support i18n arguments for module names * style: update workflow node card padding and table styling for improved layout consistency * feat: implement parallel run workflow node with documentation and i18n support * style(modal): WholeResponseModal UI and layout styling * chore: improve chat resume UX (#6764) * fix: remove stray conflict markers; use z.nativeEnum for chatGenerateStatus Made-with: Cursor * fix: use enum instead of nativeEnum * fix(chat): address resume review suggestions * fix(chat): require sse when resuming generating chats * feat(chat): gate stream resume mirror by header * refactor(chat): decouple resume mirror header parsing * feat: improve stream resume fallback * feat: block duplicate chat generation * feat: polish resume unavailable recovery * test: stabilize resume stream timeout * fix: harden resume wait flow * fix: get mcp tool raw schema * style: update UI styling and layout for LLM request detail and response modals * perf: http tool * fix: test * fix: http raw schema * fix: test * deploy yml * deploy yml --------- Co-authored-by: DigHuang <114602213+DigHuang@users.noreply.github.com> Co-authored-by: Ryo <whoeverimf5@gmail.com> Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com>
410 lines
13 KiB
YAML
410 lines
13 KiB
YAML
# 用于开发的 docker-compose 文件:
|
|
# - 只包含 FastGPT 的最小化运行条件
|
|
# - 没有 FastGPT 本体
|
|
# - 所有端口都映射到外层
|
|
# - fastgpt-pg: 5432
|
|
# - fastgpt-mongo: 27017
|
|
# - fastgpt-redis: 6379
|
|
# - fastgpt-code-sandbox: 3002
|
|
# - fastgpt-mcp-server: 3003
|
|
# - fastgpt-plugin: 3004
|
|
# - fastgpt-volume-manager: 3005
|
|
# - opensandbox-server: 8090
|
|
# - fastgpt-aiproxy: 3010
|
|
# - fastgpt-aiproxy-pg: 5432
|
|
# - 使用 pgvector 作为默认的向量库
|
|
# - 配置 opensandbox-config 的 network_mode 为 docker 网络,如 dev_fastgpt
|
|
# - 配置 opensandbox-config 的 host_ip 为宿主机 LAN IP,如 192.168.1.100
|
|
|
|
# plugin auth token
|
|
x-plugin-auth-token: &x-plugin-auth-token 'token'
|
|
# code sandbox token
|
|
x-code-sandbox-token: &x-code-sandbox-token 'codesandbox'
|
|
# volume manager auth token
|
|
x-volume-manager-auth-token: &x-volume-manager-auth-token 'vmtoken'
|
|
# aiproxy token
|
|
x-aiproxy-token: &x-aiproxy-token 'token'
|
|
# 数据库连接相关配置
|
|
x-share-db-config: &x-share-db-config
|
|
MONGODB_URI: mongodb://myusername:mypassword@fastgpt-mongo:27017/fastgpt?authSource=admin
|
|
DB_MAX_LINK: 100
|
|
REDIS_URL: redis://default:mypassword@fastgpt-redis:6379
|
|
# @see https://doc.fastgpt.cn/docs/self-host/config/object-storage
|
|
STORAGE_VENDOR: minio # minio | aws-s3 | cos | oss
|
|
STORAGE_REGION: us-east-1
|
|
STORAGE_ACCESS_KEY_ID: minioadmin
|
|
STORAGE_SECRET_ACCESS_KEY: minioadmin
|
|
STORAGE_PUBLIC_BUCKET: fastgpt-public
|
|
STORAGE_PRIVATE_BUCKET: fastgpt-private
|
|
STORAGE_EXTERNAL_ENDPOINT: http://192.168.0.2:9000 # 一个服务器和客户端均可访问到存储桶的地址,可以是固定的宿主机 IP 或者域名,注意不要填写成 127.0.0.1 或者 localhost 等本地回环地址(因为容器里无法使用)
|
|
STORAGE_S3_ENDPOINT: http://fastgpt-minio:9000 # 协议://域名(IP):端口
|
|
STORAGE_S3_FORCE_PATH_STYLE: true
|
|
STORAGE_S3_MAX_RETRIES: 3
|
|
# Log 配置
|
|
x-log-config: &x-log-config
|
|
LOG_ENABLE_CONSOLE: true
|
|
LOG_CONSOLE_LEVEL: debug
|
|
LOG_ENABLE_OTEL: false
|
|
LOG_OTEL_LEVEL: info
|
|
LOG_OTEL_URL: http://localhost:4318/v1/logs
|
|
|
|
services:
|
|
# Vector DB
|
|
fastgpt-pg:
|
|
image: pgvector/pgvector:0.8.0-pg15
|
|
container_name: fastgpt-pg
|
|
restart: always
|
|
ports: # 生产环境建议不要暴露
|
|
- 5432:5432
|
|
networks:
|
|
- fastgpt
|
|
environment:
|
|
# 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
|
|
- POSTGRES_USER=username
|
|
- POSTGRES_PASSWORD=password
|
|
- POSTGRES_DB=postgres
|
|
volumes:
|
|
- fastgpt-pg:/var/lib/postgresql/data
|
|
healthcheck:
|
|
test: ['CMD', 'pg_isready', '-U', 'username', '-d', 'postgres']
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
|
|
# DB
|
|
fastgpt-mongo:
|
|
image: mongo:5.0.32 # cpu 不支持 AVX 时候使用 4.4.29
|
|
container_name: fastgpt-mongo
|
|
restart: always
|
|
ports:
|
|
- 27017:27017
|
|
networks:
|
|
- fastgpt
|
|
command: mongod --keyFile /data/mongodb.key --replSet rs0
|
|
environment:
|
|
- MONGO_INITDB_ROOT_USERNAME=myusername
|
|
- MONGO_INITDB_ROOT_PASSWORD=mypassword
|
|
volumes:
|
|
- fastgpt-mongo:/data/db
|
|
healthcheck:
|
|
test:
|
|
[
|
|
'CMD',
|
|
'mongo',
|
|
'-u',
|
|
'myusername',
|
|
'-p',
|
|
'mypassword',
|
|
'--authenticationDatabase',
|
|
'admin',
|
|
'--eval',
|
|
"db.adminCommand('ping')"
|
|
]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
start_period: 30s
|
|
entrypoint:
|
|
- bash
|
|
- -c
|
|
- |
|
|
openssl rand -base64 128 > /data/mongodb.key
|
|
chmod 400 /data/mongodb.key
|
|
chown 999:999 /data/mongodb.key
|
|
echo 'const isInited = rs.status().ok === 1
|
|
if(!isInited){
|
|
rs.initiate({
|
|
_id: "rs0",
|
|
members: [
|
|
{ _id: 0, host: "fastgpt-mongo:27017" }
|
|
]
|
|
})
|
|
}' > /data/initReplicaSet.js
|
|
# 启动MongoDB服务
|
|
exec docker-entrypoint.sh "$$@" &
|
|
|
|
# 等待MongoDB服务启动
|
|
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do
|
|
echo "Waiting for MongoDB to start..."
|
|
sleep 2
|
|
done
|
|
|
|
# 执行初始化副本集的脚本
|
|
mongo -u myusername -p mypassword --authenticationDatabase admin /data/initReplicaSet.js
|
|
|
|
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
|
|
wait $$!
|
|
fastgpt-redis:
|
|
image: redis:7.2-alpine
|
|
container_name: fastgpt-redis
|
|
ports:
|
|
- 6379:6379
|
|
networks:
|
|
- fastgpt
|
|
restart: always
|
|
command: |
|
|
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
|
|
healthcheck:
|
|
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
|
|
interval: 10s
|
|
timeout: 3s
|
|
retries: 3
|
|
start_period: 30s
|
|
volumes:
|
|
- fastgpt-redis:/data
|
|
fastgpt-minio:
|
|
image: minio/minio:RELEASE.2025-09-07T16-13-09Z # cpu 不支持 AVX 时候使用 RELEASE.2025-09-07T16-13-09Z-cpuv1
|
|
container_name: fastgpt-minio
|
|
restart: always
|
|
networks:
|
|
- fastgpt
|
|
ports:
|
|
- '9000:9000'
|
|
- '9001:9001'
|
|
environment:
|
|
- MINIO_ROOT_USER=minioadmin
|
|
- MINIO_ROOT_PASSWORD=minioadmin
|
|
volumes:
|
|
- fastgpt-minio:/data
|
|
command: server /data --console-address ":9001"
|
|
healthcheck:
|
|
test: ['CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live']
|
|
interval: 30s
|
|
timeout: 20s
|
|
retries: 3
|
|
fastgpt-code-sandbox:
|
|
container_name: fastgpt-code-sandbox
|
|
image: ghcr.io/labring/fastgpt-code-sandbox:v4.14.10
|
|
ports:
|
|
- 3002:3000
|
|
networks:
|
|
- fastgpt
|
|
restart: always
|
|
environment:
|
|
<<: [*x-log-config]
|
|
LOG_OTEL_SERVICE_NAME: fastgpt-code-sandbox
|
|
SANDBOX_TOKEN: *x-code-sandbox-token
|
|
# ===== Resource Limits =====
|
|
# Execution timeout per request (ms)
|
|
SANDBOX_MAX_TIMEOUT: 60000
|
|
# Maximum allowed memory per user code execution (MB)
|
|
# Note: System automatically adds 50MB for runtime overhead
|
|
# Actual process limit = SANDBOX_MAX_MEMORY_MB + 50MB
|
|
SANDBOX_MAX_MEMORY_MB: 256
|
|
|
|
# ===== Process Pool =====
|
|
# Number of pre-warmed worker processes (JS + Python)
|
|
SANDBOX_POOL_SIZE: 20
|
|
|
|
# ===== Network Request Limits =====
|
|
# Whether to check if the request is to a private network
|
|
CHECK_INTERNAL_IP: false
|
|
# Maximum number of HTTP requests per execution
|
|
SANDBOX_REQUEST_MAX_COUNT: 30
|
|
# Timeout for each outbound HTTP request (ms)
|
|
SANDBOX_REQUEST_TIMEOUT: 60000
|
|
# Maximum response body size for outbound requests
|
|
SANDBOX_REQUEST_MAX_RESPONSE_MB: 10
|
|
# Maximum request body size for outbound requests (MB)
|
|
SANDBOX_REQUEST_MAX_BODY_MB: 5
|
|
|
|
# ===== Module Control =====
|
|
# JS allowed modules whitelist (comma-separated)
|
|
SANDBOX_JS_ALLOWED_MODULES: lodash,dayjs,moment,uuid,crypto-js,qs,url,querystring
|
|
# Python allowed modules whitelist (comma-separated)
|
|
SANDBOX_PYTHON_ALLOWED_MODULES: math,cmath,decimal,fractions,random,statistics,collections,array,heapq,bisect,queue,copy,itertools,functools,operator,string,re,difflib,textwrap,unicodedata,codecs,datetime,time,calendar,_strptime,json,csv,base64,binascii,struct,hashlib,hmac,secrets,uuid,typing,abc,enum,dataclasses,contextlib,pprint,weakref,numpy,pandas,matplotlib
|
|
healthcheck:
|
|
test: ['CMD', 'curl', '-f', 'http://localhost:3000/health']
|
|
interval: 30s
|
|
timeout: 20s
|
|
retries: 3
|
|
fastgpt-mcp-server:
|
|
container_name: fastgpt-mcp-server
|
|
image: ghcr.io/labring/fastgpt-mcp_server:v4.14.10
|
|
ports:
|
|
- 3003:3000
|
|
networks:
|
|
- fastgpt
|
|
restart: always
|
|
environment:
|
|
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
|
fastgpt-plugin:
|
|
image: ghcr.io/labring/fastgpt-plugin:v0.5.6
|
|
container_name: fastgpt-plugin
|
|
restart: always
|
|
ports:
|
|
- 3004:3000
|
|
networks:
|
|
- fastgpt
|
|
environment:
|
|
<<: [*x-share-db-config, *x-log-config]
|
|
AUTH_TOKEN: *x-plugin-auth-token
|
|
# 工具网络请求,最大请求和响应体
|
|
SERVICE_REQUEST_MAX_CONTENT_LENGTH: 10
|
|
# 最大 API 请求体大小
|
|
MAX_API_SIZE: 10
|
|
# 传递给 OTLP 收集器的服务名称
|
|
LOG_OTEL_SERVICE_NAME: fastgpt-plugin
|
|
depends_on:
|
|
fastgpt-minio:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test: ['CMD', 'curl', '-f', 'http://localhost:3000/health']
|
|
interval: 30s
|
|
timeout: 20s
|
|
retries: 3
|
|
|
|
# 沙盒控制器:管理 Docker 容器的创建/执行/停止/删除
|
|
# runtime=docker 模式需要挂载 Docker socket
|
|
# 配置 docker.host_ip 为宿主机 LAN IP(容器内访问宿主机服务用)
|
|
opensandbox-server:
|
|
image: opensandbox/server:v0.1.9
|
|
container_name: fastgpt-opensandbox-server
|
|
restart: always
|
|
ports:
|
|
- 8090:8090
|
|
networks:
|
|
- fastgpt
|
|
extra_hosts:
|
|
- 'host.docker.internal:host-gateway'
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock # Docker 模式必须挂载
|
|
configs:
|
|
- source: opensandbox-config
|
|
target: /etc/opensandbox/config.toml
|
|
environment:
|
|
SANDBOX_CONFIG_PATH: /etc/opensandbox/config.toml
|
|
healthcheck:
|
|
test: ['CMD', 'curl', '-f', 'http://localhost:8090/health']
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
# Pre-pull only: not started by `docker compose up` (uses profile `prepull`).
|
|
opensandbox-agent-sandbox-image:
|
|
image: ghcr.io/labring/fastgpt-agent-sandbox:v0.1
|
|
profiles:
|
|
- prepull
|
|
opensandbox-execd-image:
|
|
image: opensandbox/execd:v1.0.7
|
|
profiles:
|
|
- prepull
|
|
opensandbox-egress-image:
|
|
image: opensandbox/egress:v1.0.3
|
|
profiles:
|
|
- prepull
|
|
# 卷管理微服务:负责幂等创建/删除 Docker named volume 或 k8s PVC
|
|
fastgpt-volume-manager:
|
|
image: ghcr.io/labring/fastgpt-agent-volume-manager:v0.1
|
|
container_name: fastgpt-volume-manager
|
|
restart: always
|
|
ports:
|
|
- 3005:3000
|
|
networks:
|
|
- fastgpt
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock:ro # Docker 模式必须挂载(只读即可)
|
|
environment:
|
|
PORT: 3000
|
|
VM_RUNTIME: docker
|
|
VM_AUTH_TOKEN: *x-volume-manager-auth-token # 对应 AGENT_SANDBOX_VOLUME_MANAGER_TOKEN
|
|
VM_VOLUME_NAME_PREFIX: fastgpt-session # volume 名称前缀
|
|
VM_LOG_LEVEL: info
|
|
healthcheck:
|
|
test:
|
|
[
|
|
'CMD',
|
|
'bun',
|
|
'-e',
|
|
"fetch('http://localhost:3000/health').then((res) => { if (!res.ok) throw new Error(String(res.status)); })"
|
|
]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
|
|
# AI Proxy
|
|
fastgpt-aiproxy:
|
|
image: ghcr.io/labring/aiproxy:v0.3.5
|
|
container_name: fastgpt-aiproxy
|
|
restart: unless-stopped
|
|
ports:
|
|
- 3010:3000
|
|
depends_on:
|
|
fastgpt-aiproxy-pg:
|
|
condition: service_healthy
|
|
networks:
|
|
- fastgpt
|
|
- aiproxy
|
|
environment:
|
|
# 对应 fastgpt 里的AIPROXY_API_TOKEN
|
|
ADMIN_KEY: *x-aiproxy-token
|
|
# 错误日志详情保存时间(小时)
|
|
LOG_DETAIL_STORAGE_HOURS: 1
|
|
# 数据库连接地址
|
|
SQL_DSN: postgres://postgres:aiproxy@fastgpt-aiproxy-pg:5432/aiproxy
|
|
# 最大重试次数
|
|
RETRY_TIMES: 3
|
|
# 不需要计费
|
|
BILLING_ENABLED: false
|
|
# 不需要严格检测模型
|
|
DISABLE_MODEL_CONFIG: true
|
|
healthcheck:
|
|
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
fastgpt-aiproxy-pg:
|
|
image: pgvector/pgvector:0.8.0-pg15 # docker hub
|
|
restart: unless-stopped
|
|
container_name: fastgpt-aiproxy-pg
|
|
volumes:
|
|
- fastgpt-aiproxy_pg:/var/lib/postgresql/data
|
|
networks:
|
|
- aiproxy
|
|
environment:
|
|
TZ: Asia/Shanghai
|
|
POSTGRES_USER: postgres
|
|
POSTGRES_DB: aiproxy
|
|
POSTGRES_PASSWORD: aiproxy
|
|
healthcheck:
|
|
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
networks:
|
|
fastgpt:
|
|
aiproxy:
|
|
|
|
volumes:
|
|
fastgpt-pg:
|
|
fastgpt-mongo:
|
|
fastgpt-redis:
|
|
fastgpt-minio:
|
|
fastgpt-aiproxy_pg:
|
|
|
|
configs:
|
|
opensandbox-config:
|
|
content: |
|
|
[server]
|
|
host = "0.0.0.0"
|
|
port = 8090
|
|
log_level = "INFO"
|
|
|
|
[runtime]
|
|
type = "docker"
|
|
execd_image = "opensandbox/execd:v1.0.7"
|
|
|
|
[egress]
|
|
image = "opensandbox/egress:v1.0.3"
|
|
|
|
[docker]
|
|
network_mode = "bridge"
|
|
# When server runs in a container, set host_ip to the host's IP or hostname so bridge-mode endpoints are reachable (e.g. host.docker.internal or the host LAN IP).
|
|
# It's required when server deployed with docker container under host.
|
|
host_ip = "host.docker.internal"
|
|
drop_capabilities = ["AUDIT_WRITE", "MKNOD", "NET_ADMIN", "NET_RAW", "SYS_ADMIN", "SYS_MODULE", "SYS_PTRACE", "SYS_TIME", "SYS_TTY_CONFIG"]
|
|
no_new_privileges = true
|
|
pids_limit = 512
|
|
|
|
[ingress]
|
|
mode = "direct"
|