mirror of
https://github.com/onyx-dot-app/onyx.git
synced 2026-04-12 02:12:42 +00:00
Compare commits
3 Commits
edge
...
jamison/on
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
88b42d98b8 | ||
|
|
1e41ad7dbb | ||
|
|
b002dd3d82 |
@@ -171,6 +171,13 @@ repos:
|
||||
pass_filenames: false
|
||||
files: ^web/package(-lock)?\.json$
|
||||
|
||||
- id: compose-variants-check
|
||||
name: Check docker-compose variants are up to date
|
||||
entry: uv run deployment/docker_compose/generate_compose_variants.py --check
|
||||
language: system
|
||||
pass_filenames: false
|
||||
files: ^deployment/docker_compose/(docker-compose\.yml|generate_compose_variants\.py|headless/docker-compose\.yml)$
|
||||
|
||||
# Uses tsgo (TypeScript's native Go compiler) for ~10x faster type checking.
|
||||
# This is a preview package - if it breaks:
|
||||
# 1. Try updating: cd web && npm update @typescript/native-preview
|
||||
|
||||
482
deployment/docker_compose/generate_compose_variants.py
Normal file
482
deployment/docker_compose/generate_compose_variants.py
Normal file
@@ -0,0 +1,482 @@
|
||||
# /// script
|
||||
# requires-python = ">=3.11"
|
||||
# dependencies = ["ruamel.yaml>=0.18"]
|
||||
# ///
|
||||
"""Generate docker-compose variant files from the main docker-compose.yml.
|
||||
|
||||
Each variant defines which services to remove, which to add, which
|
||||
commented-out blocks to strip, and where to write the output. The main
|
||||
docker-compose.yml is the single source of truth — variants are derived
|
||||
from it so shared service definitions never drift.
|
||||
|
||||
Usage:
|
||||
uv run generate_compose_variants.py # generate all variants
|
||||
uv run generate_compose_variants.py headless # generate one variant
|
||||
uv run generate_compose_variants.py --check # check all (CI / pre-commit)
|
||||
uv run generate_compose_variants.py --check headless # check one
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import difflib
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import field
|
||||
from io import StringIO
|
||||
from pathlib import Path
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.comments import CommentedMap
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Variant configuration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class Variant:
|
||||
"""Describes how to derive a compose file from the main docker-compose.yml."""
|
||||
|
||||
# Variant name (used as CLI argument).
|
||||
name: str
|
||||
# Subdirectory (relative to SCRIPT_DIR) for the output file.
|
||||
output_dir: str
|
||||
# Header comment placed at the top of the generated file.
|
||||
header: str
|
||||
# Service names to remove from the base compose file.
|
||||
remove_services: set[str] = field(default_factory=set)
|
||||
# YAML strings defining services to add. Each string is a single-service
|
||||
# YAML document (e.g. "cli_server:\n image: ...").
|
||||
add_services_yaml: list[str] = field(default_factory=list)
|
||||
# Volume names to add to the top-level volumes section.
|
||||
add_volumes: list[str] = field(default_factory=list)
|
||||
# Commented-out service block names to strip (regex-matched).
|
||||
strip_commented_blocks: list[str] = field(default_factory=list)
|
||||
# Environment overrides to apply to existing services.
|
||||
# Maps service_name -> {env_var: value}. Added/overridden in the
|
||||
# service's `environment` list.
|
||||
env_overrides: dict[str, dict[str, str]] = field(default_factory=dict)
|
||||
# Volume names to remove from the top-level volumes section.
|
||||
remove_volumes: set[str] = field(default_factory=set)
|
||||
|
||||
|
||||
VARIANTS: dict[str, Variant] = {}
|
||||
|
||||
|
||||
def register(v: Variant) -> Variant:
|
||||
VARIANTS[v.name] = v
|
||||
return v
|
||||
|
||||
|
||||
# -- headless ---------------------------------------------------------------
|
||||
|
||||
register(
|
||||
Variant(
|
||||
name="headless",
|
||||
output_dir="headless",
|
||||
header="""\
|
||||
# =============================================================================
|
||||
# ONYX HEADLESS DOCKER COMPOSE (AUTO-GENERATED)
|
||||
# =============================================================================
|
||||
# This file is generated by generate_compose_variants.py from docker-compose.yml.
|
||||
# DO NOT EDIT THIS FILE DIRECTLY — your changes will be overwritten.
|
||||
#
|
||||
# To regenerate:
|
||||
# cd deployment/docker_compose
|
||||
# uv run generate_compose_variants.py headless
|
||||
#
|
||||
# Usage:
|
||||
# cd deployment/docker_compose/headless
|
||||
# docker compose up -d
|
||||
#
|
||||
# Connect via SSH:
|
||||
# ssh localhost -p 2222
|
||||
# =============================================================================
|
||||
|
||||
""",
|
||||
remove_services={"web_server", "nginx"},
|
||||
add_services_yaml=[
|
||||
"""\
|
||||
cli_server:
|
||||
image: ${ONYX_CLI_IMAGE:-onyxdotapp/onyx-cli:${IMAGE_TAG:-latest}}
|
||||
command: ["serve", "--host", "0.0.0.0", "--port", "2222"]
|
||||
depends_on:
|
||||
- api_server
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${ONYX_SSH_PORT:-2222}:2222"
|
||||
environment:
|
||||
- ONYX_SERVER_URL=http://api_server:8080
|
||||
volumes:
|
||||
- cli_config:/home/onyx/.config
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
"""
|
||||
],
|
||||
add_volumes=["cli_config"],
|
||||
strip_commented_blocks=["mcp_server", "certbot"],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# -- headless-lite ----------------------------------------------------------
|
||||
|
||||
register(
|
||||
Variant(
|
||||
name="headless-lite",
|
||||
output_dir="headless-lite",
|
||||
header="""\
|
||||
# =============================================================================
|
||||
# ONYX HEADLESS-LITE DOCKER COMPOSE (AUTO-GENERATED)
|
||||
# =============================================================================
|
||||
# This file is generated by generate_compose_variants.py from docker-compose.yml.
|
||||
# DO NOT EDIT THIS FILE DIRECTLY — your changes will be overwritten.
|
||||
#
|
||||
# To regenerate:
|
||||
# cd deployment/docker_compose
|
||||
# uv run generate_compose_variants.py headless-lite
|
||||
#
|
||||
# Usage:
|
||||
# cd deployment/docker_compose/headless-lite
|
||||
# docker compose up -d
|
||||
#
|
||||
# Connect via SSH:
|
||||
# ssh localhost -p 2222
|
||||
#
|
||||
# This is a minimal headless deployment: no web UI, no Vespa, no Redis,
|
||||
# no model servers, no background workers, no OpenSearch, and no MinIO.
|
||||
# Only PostgreSQL is required. Connectors and RAG search are disabled,
|
||||
# but core chat (LLM conversations, tools, user file uploads, Projects,
|
||||
# Agent knowledge, code interpreter) still works.
|
||||
# =============================================================================
|
||||
|
||||
""",
|
||||
remove_services={
|
||||
"web_server",
|
||||
"nginx",
|
||||
"background",
|
||||
"cache",
|
||||
"index",
|
||||
"indexing_model_server",
|
||||
"inference_model_server",
|
||||
"opensearch",
|
||||
"minio",
|
||||
},
|
||||
add_services_yaml=[
|
||||
"""\
|
||||
cli_server:
|
||||
image: ${ONYX_CLI_IMAGE:-onyxdotapp/onyx-cli:${IMAGE_TAG:-latest}}
|
||||
command: ["serve", "--host", "0.0.0.0", "--port", "2222"]
|
||||
depends_on:
|
||||
- api_server
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${ONYX_SSH_PORT:-2222}:2222"
|
||||
environment:
|
||||
- ONYX_SERVER_URL=http://api_server:8080
|
||||
volumes:
|
||||
- cli_config:/home/onyx/.config
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
"""
|
||||
],
|
||||
add_volumes=["cli_config"],
|
||||
remove_volumes={
|
||||
"vespa_volume",
|
||||
"model_cache_huggingface",
|
||||
"indexing_huggingface_model_cache",
|
||||
"background_logs",
|
||||
"inference_model_server_logs",
|
||||
"indexing_model_server_logs",
|
||||
"opensearch-data",
|
||||
"minio_data",
|
||||
},
|
||||
strip_commented_blocks=["mcp_server", "certbot"],
|
||||
env_overrides={
|
||||
"api_server": {
|
||||
"DISABLE_VECTOR_DB": "true",
|
||||
"FILE_STORE_BACKEND": "postgres",
|
||||
"CACHE_BACKEND": "postgres",
|
||||
"AUTH_BACKEND": "postgres",
|
||||
},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# YAML helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
SOURCE = SCRIPT_DIR / "docker-compose.yml"
|
||||
|
||||
|
||||
def load_yaml() -> tuple[YAML, CommentedMap]:
|
||||
yaml = YAML()
|
||||
yaml.preserve_quotes = True
|
||||
yaml.width = 4096 # avoid line wrapping
|
||||
with open(SOURCE) as f:
|
||||
data = yaml.load(f)
|
||||
return yaml, data
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Service transforms (for standalone output files)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def strip_build_blocks(services: CommentedMap) -> None:
|
||||
"""Remove ``build`` keys from all services.
|
||||
|
||||
Generated files are intended to be downloaded and used standalone — users
|
||||
pull pre-built images rather than building from source, so ``build``
|
||||
blocks are not useful and contain repo-relative paths that won't exist.
|
||||
"""
|
||||
for svc in services.values():
|
||||
if isinstance(svc, CommentedMap) and "build" in svc:
|
||||
del svc["build"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Text post-processing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def remove_commented_blocks(text: str, block_names: list[str]) -> str:
|
||||
"""Remove commented-out service blocks matching *block_names*."""
|
||||
if not block_names:
|
||||
return text
|
||||
|
||||
pattern = re.compile(
|
||||
r"^\s*#\s*(" + "|".join(re.escape(n) for n in block_names) + r"):"
|
||||
)
|
||||
|
||||
lines = text.split("\n")
|
||||
result: list[str] = []
|
||||
skip = False
|
||||
blank_run = 0
|
||||
|
||||
for line in lines:
|
||||
if pattern.match(line):
|
||||
skip = True
|
||||
# Remove preceding comment/blank lines that introduce the block
|
||||
while result and (
|
||||
result[-1].strip().startswith("#") or result[-1].strip() == ""
|
||||
):
|
||||
result.pop()
|
||||
continue
|
||||
if skip:
|
||||
stripped = line.strip()
|
||||
if stripped.startswith("#") or stripped == "":
|
||||
continue
|
||||
else:
|
||||
skip = False
|
||||
|
||||
if line.strip() == "":
|
||||
blank_run += 1
|
||||
if blank_run > 2:
|
||||
continue
|
||||
else:
|
||||
blank_run = 0
|
||||
|
||||
result.append(line)
|
||||
|
||||
return "\n".join(result)
|
||||
|
||||
|
||||
def ensure_blank_line_before(text: str, pattern: str) -> str:
|
||||
"""Ensure there is a blank line before lines matching *pattern*."""
|
||||
return re.sub(r"(\n)(" + pattern + r")", r"\1\n\2", text)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Generation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def generate(variant: Variant) -> str:
|
||||
"""Generate the variant's docker-compose.yml content."""
|
||||
yaml_inst, data = load_yaml()
|
||||
|
||||
services = data["services"]
|
||||
|
||||
# Remove services
|
||||
for name in variant.remove_services:
|
||||
if name in services:
|
||||
del services[name]
|
||||
|
||||
# Clean up depends_on entries that reference removed services
|
||||
remaining_services = set(services.keys())
|
||||
for svc_name in list(services):
|
||||
svc = services[svc_name]
|
||||
if not isinstance(svc, CommentedMap) or "depends_on" not in svc:
|
||||
continue
|
||||
deps = svc["depends_on"]
|
||||
if isinstance(deps, list):
|
||||
deps[:] = [d for d in deps if d in remaining_services]
|
||||
if not deps:
|
||||
del svc["depends_on"]
|
||||
elif isinstance(deps, CommentedMap):
|
||||
for dep_name in list(deps):
|
||||
if dep_name not in remaining_services:
|
||||
del deps[dep_name]
|
||||
if not deps:
|
||||
del svc["depends_on"]
|
||||
|
||||
# Collect names of services we're about to add (skip path adjustment for
|
||||
# these — their paths are already relative to the output subdirectory).
|
||||
added_service_names: set[str] = set()
|
||||
|
||||
# Add new services
|
||||
for svc_yaml in variant.add_services_yaml:
|
||||
helper = YAML()
|
||||
helper.preserve_quotes = True
|
||||
parsed = helper.load(svc_yaml)
|
||||
for svc_name, svc_def in parsed.items():
|
||||
services[svc_name] = svc_def
|
||||
added_service_names.add(svc_name)
|
||||
|
||||
# Apply environment overrides
|
||||
for svc_name, overrides in variant.env_overrides.items():
|
||||
if svc_name not in services:
|
||||
continue
|
||||
svc = services[svc_name]
|
||||
if not isinstance(svc, CommentedMap):
|
||||
continue
|
||||
env = svc.get("environment")
|
||||
if env is None:
|
||||
env = []
|
||||
svc["environment"] = env
|
||||
if isinstance(env, list):
|
||||
# Remove existing entries for overridden keys, then append
|
||||
for key in overrides:
|
||||
env[:] = [
|
||||
e
|
||||
for e in env
|
||||
if not (isinstance(e, str) and e.startswith(key + "="))
|
||||
]
|
||||
env.append(f"{key}={overrides[key]}")
|
||||
|
||||
# Strip build blocks — generated files are standalone downloads, users
|
||||
# pull images rather than building from source.
|
||||
strip_build_blocks(services)
|
||||
|
||||
# Add volumes
|
||||
if variant.add_volumes and "volumes" in data:
|
||||
for vol_name in variant.add_volumes:
|
||||
data["volumes"][vol_name] = None
|
||||
|
||||
# Remove volumes
|
||||
if variant.remove_volumes and "volumes" in data:
|
||||
for vol_name in variant.remove_volumes:
|
||||
if vol_name in data["volumes"]:
|
||||
del data["volumes"][vol_name]
|
||||
|
||||
# Serialize
|
||||
buf = StringIO()
|
||||
yaml_inst.dump(data, buf)
|
||||
body = buf.getvalue()
|
||||
|
||||
# Strip commented-out blocks
|
||||
body = remove_commented_blocks(body, variant.strip_commented_blocks)
|
||||
|
||||
# Strip the original header comment (everything before "name:")
|
||||
idx = body.find("name:")
|
||||
if idx > 0:
|
||||
body = body[idx:]
|
||||
|
||||
# Ensure blank lines before added services and the top-level volumes section
|
||||
for svc_name in added_service_names:
|
||||
body = ensure_blank_line_before(body, re.escape(f" {svc_name}:"))
|
||||
body = ensure_blank_line_before(body, r"volumes:\n")
|
||||
|
||||
return variant.header + body
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def output_path(variant: Variant) -> Path:
|
||||
return SCRIPT_DIR / variant.output_dir / "docker-compose.yml"
|
||||
|
||||
|
||||
def run_generate(variant: Variant) -> None:
|
||||
generated = generate(variant)
|
||||
out = output_path(variant)
|
||||
out.parent.mkdir(parents=True, exist_ok=True)
|
||||
out.write_text(generated)
|
||||
print(f"Generated {out.relative_to(SCRIPT_DIR)}")
|
||||
|
||||
|
||||
def run_check(variant: Variant) -> bool:
|
||||
"""Return True if up to date."""
|
||||
generated = generate(variant)
|
||||
out = output_path(variant)
|
||||
|
||||
if not out.exists():
|
||||
print(
|
||||
f"ERROR: {out.relative_to(SCRIPT_DIR)} does not exist. "
|
||||
f"Run without --check to generate it."
|
||||
)
|
||||
return False
|
||||
|
||||
existing = out.read_text()
|
||||
if existing == generated:
|
||||
print(f"OK: {out.relative_to(SCRIPT_DIR)} is up to date.")
|
||||
return True
|
||||
|
||||
diff = difflib.unified_diff(
|
||||
existing.splitlines(keepends=True),
|
||||
generated.splitlines(keepends=True),
|
||||
fromfile=str(out.relative_to(SCRIPT_DIR)),
|
||||
tofile="(generated)",
|
||||
)
|
||||
sys.stdout.writelines(diff)
|
||||
print(
|
||||
f"\nERROR: {out.relative_to(SCRIPT_DIR)} is out of date. "
|
||||
f"Run 'uv run generate_compose_variants.py {variant.name}' to update."
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate docker-compose variant files from docker-compose.yml"
|
||||
)
|
||||
parser.add_argument(
|
||||
"variant",
|
||||
nargs="?",
|
||||
choices=list(VARIANTS.keys()),
|
||||
help="Variant to generate (default: all)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--check",
|
||||
action="store_true",
|
||||
help="Check that generated files match existing ones (for CI / pre-commit)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
targets = [VARIANTS[args.variant]] if args.variant else list(VARIANTS.values())
|
||||
|
||||
if args.check:
|
||||
all_ok = all(run_check(v) for v in targets)
|
||||
sys.exit(0 if all_ok else 1)
|
||||
else:
|
||||
for v in targets:
|
||||
run_generate(v)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
154
deployment/docker_compose/headless-lite/docker-compose.yml
Normal file
154
deployment/docker_compose/headless-lite/docker-compose.yml
Normal file
@@ -0,0 +1,154 @@
|
||||
# =============================================================================
|
||||
# ONYX HEADLESS-LITE DOCKER COMPOSE (AUTO-GENERATED)
|
||||
# =============================================================================
|
||||
# This file is generated by generate_compose_variants.py from docker-compose.yml.
|
||||
# DO NOT EDIT THIS FILE DIRECTLY — your changes will be overwritten.
|
||||
#
|
||||
# To regenerate:
|
||||
# cd deployment/docker_compose
|
||||
# uv run generate_compose_variants.py headless-lite
|
||||
#
|
||||
# Usage:
|
||||
# cd deployment/docker_compose/headless-lite
|
||||
# docker compose up -d
|
||||
#
|
||||
# Connect via SSH:
|
||||
# ssh localhost -p 2222
|
||||
#
|
||||
# This is a minimal headless deployment: no web UI, no Vespa, no Redis,
|
||||
# no model servers, no background workers, no OpenSearch, and no MinIO.
|
||||
# Only PostgreSQL is required. Connectors and RAG search are disabled,
|
||||
# but core chat (LLM conversations, tools, user file uploads, Projects,
|
||||
# Agent knowledge, code interpreter) still works.
|
||||
# =============================================================================
|
||||
|
||||
name: onyx
|
||||
|
||||
services:
|
||||
api_server:
|
||||
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
|
||||
command: >
|
||||
/bin/sh -c "alembic upgrade head &&
|
||||
echo \"Starting Onyx Api Server\" &&
|
||||
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
|
||||
# Check env.template and copy to .env for env vars
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
depends_on:
|
||||
relational_db:
|
||||
condition: service_started
|
||||
restart: unless-stopped
|
||||
# DEV: To expose ports, either:
|
||||
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait
|
||||
# 2. Uncomment the ports below
|
||||
# ports:
|
||||
# - "8080:8080"
|
||||
environment:
|
||||
# Auth Settings
|
||||
- AUTH_TYPE=${AUTH_TYPE:-basic}
|
||||
- POSTGRES_HOST=${POSTGRES_HOST:-relational_db}
|
||||
- VESPA_HOST=${VESPA_HOST:-index}
|
||||
- OPENSEARCH_HOST=${OPENSEARCH_HOST:-opensearch}
|
||||
- OPENSEARCH_ADMIN_PASSWORD=${OPENSEARCH_ADMIN_PASSWORD:-StrongPassword123!}
|
||||
- ENABLE_OPENSEARCH_INDEXING_FOR_ONYX=${OPENSEARCH_FOR_ONYX_ENABLED:-true}
|
||||
- REDIS_HOST=${REDIS_HOST:-cache}
|
||||
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
|
||||
- CODE_INTERPRETER_BASE_URL=${CODE_INTERPRETER_BASE_URL:-http://code-interpreter:8000}
|
||||
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
|
||||
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
|
||||
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
|
||||
- ENABLE_CRAFT=${ENABLE_CRAFT:-false}
|
||||
- OUTPUTS_TEMPLATE_PATH=${OUTPUTS_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/outputs}
|
||||
- VENV_TEMPLATE_PATH=${VENV_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/venv}
|
||||
- WEB_TEMPLATE_PATH=${WEB_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/outputs/web}
|
||||
- PERSISTENT_DOCUMENT_STORAGE_PATH=${PERSISTENT_DOCUMENT_STORAGE_PATH:-/app/file-system}
|
||||
- DISABLE_VECTOR_DB=true
|
||||
- FILE_STORE_BACKEND=postgres
|
||||
- CACHE_BACKEND=postgres
|
||||
- AUTH_BACKEND=postgres
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
start_period: 25s
|
||||
# Optional, only for debugging purposes
|
||||
volumes:
|
||||
- api_server_logs:/var/log/onyx
|
||||
# Shared volume for persistent document storage (Craft file-system mode)
|
||||
- file-system:/app/file-system
|
||||
|
||||
relational_db:
|
||||
image: postgres:15.2-alpine
|
||||
shm_size: 1g
|
||||
command: -c 'max_connections=250'
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
restart: unless-stopped
|
||||
# PRODUCTION: Override the defaults by passing in the environment variables
|
||||
environment:
|
||||
- POSTGRES_USER=${POSTGRES_USER:-postgres}
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-password}
|
||||
# DEV: To expose ports, either:
|
||||
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait
|
||||
# 2. Uncomment the ports below
|
||||
# ports:
|
||||
# - "5432:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
volumes:
|
||||
- db_volume:/var/lib/postgresql/data
|
||||
|
||||
# This container name cannot have an underscore in it due to Vespa expectations of the URL
|
||||
code-interpreter:
|
||||
image: onyxdotapp/code-interpreter:${CODE_INTERPRETER_IMAGE_TAG:-latest}
|
||||
command: ["bash", "./entrypoint.sh", "code-interpreter-api"]
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
|
||||
# Below is needed for the `docker-out-of-docker` execution mode
|
||||
# For Linux rootless Docker, set DOCKER_SOCK_PATH=${XDG_RUNTIME_DIR}/docker.sock
|
||||
user: root
|
||||
volumes:
|
||||
- ${DOCKER_SOCK_PATH:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
|
||||
cli_server:
|
||||
image: ${ONYX_CLI_IMAGE:-onyxdotapp/onyx-cli:${IMAGE_TAG:-latest}}
|
||||
command: ["serve", "--host", "0.0.0.0", "--port", "2222"]
|
||||
depends_on:
|
||||
- api_server
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${ONYX_SSH_PORT:-2222}:2222"
|
||||
environment:
|
||||
- ONYX_SERVER_URL=http://api_server:8080
|
||||
volumes:
|
||||
- cli_config:/home/onyx/.config
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
|
||||
volumes:
|
||||
# Necessary for persisting data for use
|
||||
db_volume:
|
||||
# Logs preserved across container restarts
|
||||
api_server_logs:
|
||||
# Shared volume for persistent document storage (Craft file-system mode)
|
||||
file-system:
|
||||
cli_config:
|
||||
422
deployment/docker_compose/headless/docker-compose.yml
Normal file
422
deployment/docker_compose/headless/docker-compose.yml
Normal file
@@ -0,0 +1,422 @@
|
||||
# =============================================================================
|
||||
# ONYX HEADLESS DOCKER COMPOSE (AUTO-GENERATED)
|
||||
# =============================================================================
|
||||
# This file is generated by generate_compose_variants.py from docker-compose.yml.
|
||||
# DO NOT EDIT THIS FILE DIRECTLY — your changes will be overwritten.
|
||||
#
|
||||
# To regenerate:
|
||||
# cd deployment/docker_compose
|
||||
# uv run generate_compose_variants.py headless
|
||||
#
|
||||
# Usage:
|
||||
# cd deployment/docker_compose/headless
|
||||
# docker compose up -d
|
||||
#
|
||||
# Connect via SSH:
|
||||
# ssh localhost -p 2222
|
||||
# =============================================================================
|
||||
|
||||
name: onyx
|
||||
|
||||
services:
|
||||
api_server:
|
||||
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
|
||||
command: >
|
||||
/bin/sh -c "alembic upgrade head &&
|
||||
echo \"Starting Onyx Api Server\" &&
|
||||
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
|
||||
# Check env.template and copy to .env for env vars
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
depends_on:
|
||||
relational_db:
|
||||
condition: service_started
|
||||
index:
|
||||
condition: service_started
|
||||
opensearch:
|
||||
condition: service_started
|
||||
required: false
|
||||
cache:
|
||||
condition: service_started
|
||||
inference_model_server:
|
||||
condition: service_started
|
||||
minio:
|
||||
condition: service_started
|
||||
required: false
|
||||
restart: unless-stopped
|
||||
# DEV: To expose ports, either:
|
||||
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait
|
||||
# 2. Uncomment the ports below
|
||||
# ports:
|
||||
# - "8080:8080"
|
||||
environment:
|
||||
# Auth Settings
|
||||
- AUTH_TYPE=${AUTH_TYPE:-basic}
|
||||
- FILE_STORE_BACKEND=${FILE_STORE_BACKEND:-s3}
|
||||
- POSTGRES_HOST=${POSTGRES_HOST:-relational_db}
|
||||
- VESPA_HOST=${VESPA_HOST:-index}
|
||||
- OPENSEARCH_HOST=${OPENSEARCH_HOST:-opensearch}
|
||||
- OPENSEARCH_ADMIN_PASSWORD=${OPENSEARCH_ADMIN_PASSWORD:-StrongPassword123!}
|
||||
- ENABLE_OPENSEARCH_INDEXING_FOR_ONYX=${OPENSEARCH_FOR_ONYX_ENABLED:-true}
|
||||
- REDIS_HOST=${REDIS_HOST:-cache}
|
||||
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
|
||||
- CODE_INTERPRETER_BASE_URL=${CODE_INTERPRETER_BASE_URL:-http://code-interpreter:8000}
|
||||
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
|
||||
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
|
||||
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
|
||||
# Onyx Craft configuration (disabled by default, set ENABLE_CRAFT=true in .env to enable)
|
||||
# Use --include-craft with install script, or manually set in .env file
|
||||
- ENABLE_CRAFT=${ENABLE_CRAFT:-false}
|
||||
- OUTPUTS_TEMPLATE_PATH=${OUTPUTS_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/outputs}
|
||||
- VENV_TEMPLATE_PATH=${VENV_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/venv}
|
||||
- WEB_TEMPLATE_PATH=${WEB_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/outputs/web}
|
||||
- PERSISTENT_DOCUMENT_STORAGE_PATH=${PERSISTENT_DOCUMENT_STORAGE_PATH:-/app/file-system}
|
||||
# PRODUCTION: Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
|
||||
# volumes:
|
||||
# - ./bundle.pem:/app/bundle.pem:ro
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8080/health')"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
start_period: 25s
|
||||
# Optional, only for debugging purposes
|
||||
volumes:
|
||||
- api_server_logs:/var/log/onyx
|
||||
# Shared volume for persistent document storage (Craft file-system mode)
|
||||
- file-system:/app/file-system
|
||||
|
||||
background:
|
||||
image: ${ONYX_BACKEND_IMAGE:-onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}}
|
||||
command: >
|
||||
/bin/sh -c "
|
||||
if [ -f /app/scripts/setup_craft_templates.sh ]; then
|
||||
/app/scripts/setup_craft_templates.sh;
|
||||
fi &&
|
||||
if [ -f /etc/ssl/certs/custom-ca.crt ]; then
|
||||
update-ca-certificates;
|
||||
fi &&
|
||||
/app/scripts/supervisord_entrypoint.sh"
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
depends_on:
|
||||
relational_db:
|
||||
condition: service_started
|
||||
index:
|
||||
condition: service_started
|
||||
opensearch:
|
||||
condition: service_started
|
||||
required: false
|
||||
cache:
|
||||
condition: service_started
|
||||
inference_model_server:
|
||||
condition: service_started
|
||||
indexing_model_server:
|
||||
condition: service_started
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- FILE_STORE_BACKEND=${FILE_STORE_BACKEND:-s3}
|
||||
- POSTGRES_HOST=${POSTGRES_HOST:-relational_db}
|
||||
- VESPA_HOST=${VESPA_HOST:-index}
|
||||
- OPENSEARCH_HOST=${OPENSEARCH_HOST:-opensearch}
|
||||
- OPENSEARCH_ADMIN_PASSWORD=${OPENSEARCH_ADMIN_PASSWORD:-StrongPassword123!}
|
||||
- ENABLE_OPENSEARCH_INDEXING_FOR_ONYX=${OPENSEARCH_FOR_ONYX_ENABLED:-true}
|
||||
- REDIS_HOST=${REDIS_HOST:-cache}
|
||||
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
|
||||
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
|
||||
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL:-http://minio:9000}
|
||||
- S3_AWS_ACCESS_KEY_ID=${S3_AWS_ACCESS_KEY_ID:-minioadmin}
|
||||
- S3_AWS_SECRET_ACCESS_KEY=${S3_AWS_SECRET_ACCESS_KEY:-minioadmin}
|
||||
- DISCORD_BOT_TOKEN=${DISCORD_BOT_TOKEN:-}
|
||||
- DISCORD_BOT_INVOKE_CHAR=${DISCORD_BOT_INVOKE_CHAR:-!}
|
||||
# API Server connection for Discord bot message processing
|
||||
- API_SERVER_PROTOCOL=${API_SERVER_PROTOCOL:-http}
|
||||
- API_SERVER_HOST=${API_SERVER_HOST:-api_server}
|
||||
# Onyx Craft configuration (set up automatically on container startup)
|
||||
- ENABLE_CRAFT=${ENABLE_CRAFT:-false}
|
||||
- OUTPUTS_TEMPLATE_PATH=${OUTPUTS_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/outputs}
|
||||
- VENV_TEMPLATE_PATH=${VENV_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/venv}
|
||||
- WEB_TEMPLATE_PATH=${WEB_TEMPLATE_PATH:-/app/onyx/server/features/build/sandbox/kubernetes/docker/templates/outputs/web}
|
||||
- PERSISTENT_DOCUMENT_STORAGE_PATH=${PERSISTENT_DOCUMENT_STORAGE_PATH:-/app/file-system}
|
||||
# PRODUCTION: Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
|
||||
# volumes:
|
||||
# - ./bundle.pem:/app/bundle.pem:ro
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
# Optional, only for debugging purposes
|
||||
volumes:
|
||||
- background_logs:/var/log/onyx
|
||||
# Shared volume for persistent document storage (Craft file-system mode)
|
||||
- file-system:/app/file-system
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
# PRODUCTION: Uncomment the following lines if you need to include a custom CA certificate
|
||||
# This section enables the use of a custom CA certificate
|
||||
# If present, the custom CA certificate is mounted as a volume
|
||||
# The container checks for its existence and updates the system's CA certificates
|
||||
# This allows for secure communication with services using custom SSL certificates
|
||||
# Optional volume mount for CA certificate
|
||||
# volumes:
|
||||
# # Maps to the CA_CERT_PATH environment variable in the Dockerfile
|
||||
# - ${CA_CERT_PATH:-./custom-ca.crt}:/etc/ssl/certs/custom-ca.crt:ro
|
||||
|
||||
inference_model_server:
|
||||
image: ${ONYX_MODEL_SERVER_IMAGE:-onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}}
|
||||
command: >
|
||||
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
|
||||
echo 'Skipping service...';
|
||||
exit 0;
|
||||
else
|
||||
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
|
||||
fi"
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
# Not necessary, this is just to reduce download time during startup
|
||||
- model_cache_huggingface:/app/.cache/huggingface/
|
||||
# Optional, only for debugging purposes
|
||||
- inference_model_server_logs:/var/log/onyx
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:9000/api/health')"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
indexing_model_server:
|
||||
image: ${ONYX_MODEL_SERVER_IMAGE:-onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}}
|
||||
command: >
|
||||
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-}\" = \"True\" ] || [ \"${DISABLE_MODEL_SERVER:-}\" = \"true\" ]; then
|
||||
echo 'Skipping service...';
|
||||
exit 0;
|
||||
else
|
||||
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
|
||||
fi"
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- INDEXING_ONLY=True
|
||||
volumes:
|
||||
# Not necessary, this is just to reduce download time during startup
|
||||
- indexing_huggingface_model_cache:/app/.cache/huggingface/
|
||||
# Optional, only for debugging purposes
|
||||
- indexing_model_server_logs:/var/log/onyx
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:9000/api/health')"]
|
||||
interval: 20s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
relational_db:
|
||||
image: postgres:15.2-alpine
|
||||
shm_size: 1g
|
||||
command: -c 'max_connections=250'
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
restart: unless-stopped
|
||||
# PRODUCTION: Override the defaults by passing in the environment variables
|
||||
environment:
|
||||
- POSTGRES_USER=${POSTGRES_USER:-postgres}
|
||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-password}
|
||||
# DEV: To expose ports, either:
|
||||
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait
|
||||
# 2. Uncomment the ports below
|
||||
# ports:
|
||||
# - "5432:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
volumes:
|
||||
- db_volume:/var/lib/postgresql/data
|
||||
|
||||
# This container name cannot have an underscore in it due to Vespa expectations of the URL
|
||||
index:
|
||||
image: vespaengine/vespa:8.609.39
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
environment:
|
||||
- VESPA_SKIP_UPGRADE_CHECK=${VESPA_SKIP_UPGRADE_CHECK:-true}
|
||||
# DEV: To expose ports, either:
|
||||
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait
|
||||
# 2. Uncomment the ports below
|
||||
# ports:
|
||||
# - "19071:19071"
|
||||
# - "8081:8081"
|
||||
volumes:
|
||||
- vespa_volume:/opt/vespa/var
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
|
||||
opensearch:
|
||||
image: opensearchproject/opensearch:3.4.0
|
||||
restart: unless-stopped
|
||||
# Controls whether this service runs. In order to enable it, add
|
||||
# opensearch-enabled to COMPOSE_PROFILES in the environment for this
|
||||
# docker-compose.
|
||||
# NOTE: Now enabled on by default. To explicitly disable this service,
|
||||
# uncomment this profile and ensure COMPOSE_PROFILES in your env does not
|
||||
# list the profile, or when running docker compose, include all desired
|
||||
# service names but this one. Additionally set
|
||||
# OPENSEARCH_FOR_ONYX_ENABLED=false in your env.
|
||||
# profiles: ["opensearch-enabled"]
|
||||
environment:
|
||||
# We need discovery.type=single-node so that OpenSearch doesn't try
|
||||
# forming a cluster and waiting for other nodes to become live.
|
||||
- discovery.type=single-node
|
||||
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_ADMIN_PASSWORD:-StrongPassword123!}
|
||||
# This and the JVM config below come from the example in https://docs.opensearch.org/latest/install-and-configure/install-opensearch/docker/
|
||||
# We do this to avoid unstable performance from page swaps.
|
||||
- bootstrap.memory_lock=true # Disable JVM heap memory swapping.
|
||||
# Java heap should be ~50% of memory limit. For now we assume a limit of
|
||||
# 4g although in practice the container can request more than this.
|
||||
# See https://opster.com/guides/opensearch/opensearch-basics/opensearch-heap-size-usage-and-jvm-garbage-collection/
|
||||
# Xms is the starting size, Xmx is the maximum size. These should be the
|
||||
# same.
|
||||
- "OPENSEARCH_JAVA_OPTS=-Xms2g -Xmx2g"
|
||||
volumes:
|
||||
- opensearch-data:/usr/share/opensearch/data
|
||||
# These come from the example in https://docs.opensearch.org/latest/install-and-configure/install-opensearch/docker/
|
||||
ulimits:
|
||||
# Similarly to bootstrap.memory_lock, we don't want to impose limits on
|
||||
# how much memory a process can lock from being swapped.
|
||||
memlock:
|
||||
soft: -1 # Set memlock to unlimited (no soft or hard limit).
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536.
|
||||
hard: 65536
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
|
||||
cache:
|
||||
image: redis:7.4-alpine
|
||||
restart: unless-stopped
|
||||
# DEV: To expose ports, either:
|
||||
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait
|
||||
# 2. Uncomment the ports below
|
||||
# ports:
|
||||
# - "6379:6379"
|
||||
# docker silently mounts /data even without an explicit volume mount, which enables
|
||||
# persistence. explicitly setting save and appendonly forces ephemeral behavior.
|
||||
command: redis-server --save "" --appendonly no
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
# Use tmpfs to prevent creation of anonymous volumes for /data
|
||||
tmpfs:
|
||||
- /data
|
||||
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2025-07-23T15-54-02Z-cpuv1
|
||||
profiles: ["s3-filestore"]
|
||||
restart: unless-stopped
|
||||
# DEV: To expose ports, either:
|
||||
# 1. Use docker-compose.dev.yml: docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d --wait
|
||||
# 2. Uncomment the ports below
|
||||
# ports:
|
||||
# - "9004:9000"
|
||||
# - "9005:9001"
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
environment:
|
||||
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
|
||||
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
|
||||
# Note: we've seen the default bucket creation logic not work in some cases
|
||||
MINIO_DEFAULT_BUCKETS: ${S3_FILE_STORE_BUCKET_NAME:-onyx-file-store-bucket}
|
||||
volumes:
|
||||
- minio_data:/data
|
||||
command: server /data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
|
||||
code-interpreter:
|
||||
image: onyxdotapp/code-interpreter:${CODE_INTERPRETER_IMAGE_TAG:-latest}
|
||||
command: ["bash", "./entrypoint.sh", "code-interpreter-api"]
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- path: .env
|
||||
required: false
|
||||
|
||||
# Below is needed for the `docker-out-of-docker` execution mode
|
||||
# For Linux rootless Docker, set DOCKER_SOCK_PATH=${XDG_RUNTIME_DIR}/docker.sock
|
||||
user: root
|
||||
volumes:
|
||||
- ${DOCKER_SOCK_PATH:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
|
||||
cli_server:
|
||||
image: ${ONYX_CLI_IMAGE:-onyxdotapp/onyx-cli:${IMAGE_TAG:-latest}}
|
||||
command: ["serve", "--host", "0.0.0.0", "--port", "2222"]
|
||||
depends_on:
|
||||
- api_server
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${ONYX_SSH_PORT:-2222}:2222"
|
||||
environment:
|
||||
- ONYX_SERVER_URL=http://api_server:8080
|
||||
volumes:
|
||||
- cli_config:/home/onyx/.config
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "50m"
|
||||
max-file: "6"
|
||||
|
||||
volumes:
|
||||
# Necessary for persisting data for use
|
||||
db_volume:
|
||||
vespa_volume: # Created by the container itself
|
||||
minio_data:
|
||||
# Caches to prevent re-downloading models, not strictly necessary
|
||||
model_cache_huggingface:
|
||||
indexing_huggingface_model_cache:
|
||||
# Logs preserved across container restarts
|
||||
api_server_logs:
|
||||
background_logs:
|
||||
# mcp_server_logs:
|
||||
inference_model_server_logs:
|
||||
indexing_model_server_logs:
|
||||
# Shared volume for persistent document storage (Craft file-system mode)
|
||||
file-system:
|
||||
# Persistent data for OpenSearch.
|
||||
opensearch-data:
|
||||
cli_config:
|
||||
Reference in New Issue
Block a user