Compare commits

...

23 Commits

Author SHA1 Message Date
Jamison Lahman
b32e2fd304 chore(lint): run shellcheck in pre-commit 2026-04-09 23:22:47 +00:00
Jamison Lahman
4a96ef13d7 chore(devtools): devcontainer allows go and rust repos (#10041) 2026-04-09 15:46:50 -07:00
Jamison Lahman
822b0c99be chore(devtools): upgrade ods: 0.7.3->0.7.4 (#10039) 2026-04-09 14:44:56 -07:00
Jamison Lahman
bcf2851a85 chore(devtools): introduce a .devcontainer (#10035)
Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-09 14:27:30 -07:00
Nikolas Garza
a5a59bd8f0 feat(helm): add API and heavy worker ServiceMonitors (#10025) 2026-04-09 21:03:27 +00:00
Nikolas Garza
32d2e7985a feat(slack-bot): make agent selector searchable (#10036) 2026-04-09 20:53:47 +00:00
Bo-Onyx
c4f8d5370b fix(helm): declare metrics port on celery-worker-heavy (#10033) 2026-04-09 18:29:31 +00:00
Nikolas Garza
9e434f6a5a fix(chat): set consistent 720px content width for chat and input bar (#10032) 2026-04-09 18:06:35 +00:00
Raunak Bhagat
67dc819319 refactor: consolidate LLM provider modal routing (#10030) 2026-04-09 18:02:43 +00:00
Nikolas Garza
2d12274050 feat(chat): add deselect preferred response with smooth transitions and scroll preservation (#10026) 2026-04-09 18:00:54 +00:00
Nikolas Garza
c727ba13ee feat(nrf): add ModelSelector and multi-model support to Chrome extension (#10023) 2026-04-09 16:43:40 +00:00
Jamison Lahman
6193dd5326 chore(python): simplify internal packages/workspace (#10029) 2026-04-09 09:32:19 -07:00
Nikolas Garza
387a7d1cea fix(chat): prevent popover flash when selecting 3rd model (#10021) 2026-04-09 15:52:12 +00:00
Nikolas Garza
869578eeed fix(chat): only collapse sidebar on multi-model submit (#10020) 2026-04-09 15:41:32 +00:00
Nikolas Garza
e68648ab74 fix(chat): gate ModelSelector render on agent and provider readiness (#10017) 2026-04-09 15:41:01 +00:00
Nikolas Garza
da01002099 fix(chat): center multi-model response panels in chat view (#10006) 2026-04-09 15:40:22 +00:00
Nikolas Garza
f5d66f389c fix(input): differentiate attach file and add model icons (#10024) 2026-04-09 03:30:10 +00:00
Nikolas Garza
82d89f78c6 fix(chat): resolve model selector showing stale model on agent switch (#10022) 2026-04-09 03:06:24 +00:00
Jamison Lahman
6f49c5e32c chore: update generic LLM configuration help copy (#10011) 2026-04-09 01:08:41 +00:00
Justin Tahara
41f2bd2f19 chore(edge): Skip edge tag (#10019) 2026-04-09 00:56:51 +00:00
Jamison Lahman
bfa2f672f9 fix: /api/admin/llm/built-in/options/custom 404 (#10009) 2026-04-08 17:47:13 -07:00
Justin Tahara
a823c3ead1 chore(ods): Bump from v0.7.2 -> v0.7.3 (#10018) 2026-04-09 00:30:22 +00:00
Justin Tahara
bd7d378a9a chore(python sandbox): Bump to v0.3.3 (#10016) 2026-04-09 00:10:19 +00:00
74 changed files with 2240 additions and 1325 deletions

65
.devcontainer/Dockerfile Normal file
View File

@@ -0,0 +1,65 @@
FROM ubuntu:26.04@sha256:cc925e589b7543b910fea57a240468940003fbfc0515245a495dd0ad8fe7cef1
RUN apt-get update && apt-get install -y --no-install-recommends \
acl \
curl \
fd-find \
fzf \
git \
jq \
less \
make \
neovim \
openssh-client \
python3-venv \
ripgrep \
sudo \
ca-certificates \
iptables \
ipset \
iproute2 \
dnsutils \
unzip \
wget \
zsh \
&& curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
&& apt-get install -y nodejs \
&& install -m 0755 -d /etc/apt/keyrings \
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" > /etc/apt/sources.list.d/docker.list \
&& curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg -o /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" > /etc/apt/sources.list.d/github-cli.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends docker-ce-cli docker-compose-plugin gh \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# fd-find installs as fdfind on Debian/Ubuntu — symlink to fd
RUN ln -sf "$(which fdfind)" /usr/local/bin/fd
# Install uv (Python package manager)
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /usr/local/bin/
# Create non-root dev user with passwordless sudo
RUN useradd -m -s /bin/zsh dev && \
echo "dev ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/dev && \
chmod 0440 /etc/sudoers.d/dev
ENV DEVCONTAINER=true
RUN mkdir -p /workspace && \
chown -R dev:dev /workspace
WORKDIR /workspace
# Install Claude Code
ARG CLAUDE_CODE_VERSION=latest
RUN npm install -g @anthropic-ai/claude-code@${CLAUDE_CODE_VERSION}
# Configure zsh — source the repo-local zshrc so shell customization
# doesn't require an image rebuild.
RUN chsh -s /bin/zsh root && \
for rc in /root/.zshrc /home/dev/.zshrc; do \
echo '[ -f /workspace/.devcontainer/zshrc ] && . /workspace/.devcontainer/zshrc' >> "$rc"; \
done && \
chown dev:dev /home/dev/.zshrc

126
.devcontainer/README.md Normal file
View File

@@ -0,0 +1,126 @@
# Onyx Dev Container
A containerized development environment for working on Onyx.
## What's included
- Ubuntu 26.04 base image
- Node.js 20, uv, Claude Code
- Docker CLI, GitHub CLI (`gh`)
- Neovim, ripgrep, fd, fzf, jq, make, wget, unzip
- Zsh as default shell (sources host `~/.zshrc` if available)
- Python venv auto-activation
- Network firewall (default-deny, whitelists npm, GitHub, Anthropic APIs, Sentry, and VS Code update servers)
## Usage
### VS Code
1. Install the [Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
2. Open this repo in VS Code
3. "Reopen in Container" when prompted
### CLI (`ods dev`)
The [`ods` devtools CLI](../tools/ods/README.md) provides workspace-aware wrappers
for all devcontainer operations (also available as `ods dc`):
```bash
# Start the container
ods dev up
# Open a shell
ods dev into
# Run a command
ods dev exec npm test
# Stop the container
ods dev stop
```
If you don't have `ods` installed, use the `devcontainer` CLI directly:
```bash
npm install -g @devcontainers/cli
devcontainer up --workspace-folder .
devcontainer exec --workspace-folder . zsh
```
## Restarting the container
### VS Code
Open the Command Palette (`Ctrl+Shift+P` / `Cmd+Shift+P`) and run:
- **Dev Containers: Reopen in Container** — restarts the container without rebuilding
### CLI
```bash
# Restart the container
ods dev restart
# Pull the latest published image and recreate
ods dev rebuild
```
Or without `ods`:
```bash
devcontainer up --workspace-folder . --remove-existing-container
```
## Image
The devcontainer uses a prebuilt image published to `onyxdotapp/onyx-devcontainer`.
The tag is pinned in `devcontainer.json` — no local build is required.
To build the image locally (e.g. while iterating on the Dockerfile):
```bash
docker buildx bake devcontainer
```
The `devcontainer` target is defined in `docker-bake.hcl` at the repo root.
## User & permissions
The container runs as the `dev` user by default (`remoteUser` in devcontainer.json).
An init script (`init-dev-user.sh`) runs at container start to ensure `dev` has
read/write access to the bind-mounted workspace:
- **Standard Docker** — `dev`'s UID/GID is remapped to match the workspace owner,
so file permissions work seamlessly.
- **Rootless Docker** — The workspace appears as root-owned (UID 0) inside the
container due to user-namespace mapping. The init script grants `dev` access via
POSIX ACLs (`setfacl`), which adds a few seconds to the first container start on
large repos.
## Docker socket
The container mounts the host's Docker socket so you can run `docker` commands
from inside. `ods dev` auto-detects the socket path and sets `DOCKER_SOCK`:
| Environment | Socket path |
| ----------------------- | ------------------------------ |
| Linux (rootless Docker) | `$XDG_RUNTIME_DIR/docker.sock` |
| macOS (Docker Desktop) | `~/.docker/run/docker.sock` |
| Linux (standard Docker) | `/var/run/docker.sock` |
To override, set `DOCKER_SOCK` before running `ods dev up`. When using the
VS Code extension or `devcontainer` CLI directly (without `ods`), you must set
`DOCKER_SOCK` yourself.
## Firewall
The container starts with a default-deny firewall (`init-firewall.sh`) that only allows outbound traffic to:
- npm registry
- GitHub
- Anthropic API
- Sentry
- VS Code update servers
This requires the `NET_ADMIN` and `NET_RAW` capabilities, which are added via `runArgs` in `devcontainer.json`.

View File

@@ -0,0 +1,22 @@
{
"name": "Onyx Dev Sandbox",
"image": "onyxdotapp/onyx-devcontainer@sha256:12184169c5bcc9cca0388286d5ffe504b569bc9c37bfa631b76ee8eee2064055",
"runArgs": ["--cap-add=NET_ADMIN", "--cap-add=NET_RAW"],
"mounts": [
"source=${localEnv:DOCKER_SOCK},target=/var/run/docker.sock,type=bind",
"source=${localEnv:HOME}/.claude,target=/home/dev/.claude,type=bind",
"source=${localEnv:HOME}/.claude.json,target=/home/dev/.claude.json,type=bind",
"source=${localEnv:HOME}/.zshrc,target=/home/dev/.zshrc.host,type=bind,readonly",
"source=${localEnv:HOME}/.gitconfig,target=/home/dev/.gitconfig.host,type=bind,readonly",
"source=${localEnv:HOME}/.ssh,target=/home/dev/.ssh.host,type=bind,readonly",
"source=${localEnv:HOME}/.config/nvim,target=/home/dev/.config/nvim.host,type=bind,readonly",
"source=onyx-devcontainer-cache,target=/home/dev/.cache,type=volume",
"source=onyx-devcontainer-local,target=/home/dev/.local,type=volume"
],
"remoteUser": "dev",
"updateRemoteUserUID": false,
"workspaceMount": "source=${localWorkspaceFolder},target=/workspace,type=bind,consistency=delegated",
"workspaceFolder": "/workspace",
"postStartCommand": "sudo bash /workspace/.devcontainer/init-dev-user.sh && sudo bash /workspace/.devcontainer/init-firewall.sh",
"waitFor": "postStartCommand"
}

View File

@@ -0,0 +1,106 @@
#!/usr/bin/env bash
set -euo pipefail
# Remap the dev user's UID/GID to match the workspace owner so that
# bind-mounted files are accessible without running as root.
#
# Standard Docker: Workspace is owned by the host user's UID (e.g. 1000).
# We remap dev to that UID -- fast and seamless.
#
# Rootless Docker: Workspace appears as root-owned (UID 0) inside the
# container due to user-namespace mapping. We can't remap
# dev to UID 0 (that's root), so we grant access with
# POSIX ACLs instead.
WORKSPACE=/workspace
TARGET_USER=dev
WS_UID=$(stat -c '%u' "$WORKSPACE")
WS_GID=$(stat -c '%g' "$WORKSPACE")
DEV_UID=$(id -u "$TARGET_USER")
DEV_GID=$(id -g "$TARGET_USER")
DEV_HOME=/home/"$TARGET_USER"
# Ensure directories that tools expect exist under ~dev.
# ~/.local and ~/.cache are named Docker volumes -- ensure they are owned by dev.
mkdir -p "$DEV_HOME"/.local/state "$DEV_HOME"/.local/share
chown -R "$TARGET_USER":"$TARGET_USER" "$DEV_HOME"/.local
chown -R "$TARGET_USER":"$TARGET_USER" "$DEV_HOME"/.cache
# Copy host configs mounted as *.host into their real locations.
# This gives the dev user owned copies without touching host originals.
if [ -d "$DEV_HOME/.ssh.host" ]; then
cp -a "$DEV_HOME/.ssh.host" "$DEV_HOME/.ssh"
chmod 700 "$DEV_HOME/.ssh"
chmod 600 "$DEV_HOME"/.ssh/id_* 2>/dev/null || true
chown -R "$TARGET_USER":"$TARGET_USER" "$DEV_HOME/.ssh"
fi
if [ -d "$DEV_HOME/.config/nvim.host" ]; then
mkdir -p "$DEV_HOME/.config"
cp -a "$DEV_HOME/.config/nvim.host" "$DEV_HOME/.config/nvim"
chown -R "$TARGET_USER":"$TARGET_USER" "$DEV_HOME/.config/nvim"
fi
# Already matching -- nothing to do.
if [ "$WS_UID" = "$DEV_UID" ] && [ "$WS_GID" = "$DEV_GID" ]; then
exit 0
fi
if [ "$WS_UID" != "0" ]; then
# ── Standard Docker ──────────────────────────────────────────────
# Workspace is owned by a non-root UID (the host user).
# Remap dev's UID/GID to match.
if [ "$DEV_GID" != "$WS_GID" ]; then
if ! groupmod -g "$WS_GID" "$TARGET_USER" 2>&1; then
echo "warning: failed to remap $TARGET_USER GID to $WS_GID" >&2
fi
fi
if [ "$DEV_UID" != "$WS_UID" ]; then
if ! usermod -u "$WS_UID" -g "$WS_GID" "$TARGET_USER" 2>&1; then
echo "warning: failed to remap $TARGET_USER UID to $WS_UID" >&2
fi
fi
if ! chown -R "$TARGET_USER":"$TARGET_USER" /home/"$TARGET_USER" 2>&1; then
echo "warning: failed to chown /home/$TARGET_USER" >&2
fi
else
# ── Rootless Docker ──────────────────────────────────────────────
# Workspace is root-owned inside the container. Grant dev access
# via POSIX ACLs (preserves ownership, works across the namespace
# boundary).
if command -v setfacl &>/dev/null; then
setfacl -Rm "u:${TARGET_USER}:rwX" "$WORKSPACE"
setfacl -Rdm "u:${TARGET_USER}:rwX" "$WORKSPACE" # default ACL for new files
# Git refuses to operate in repos owned by a different UID.
# Host gitconfig is mounted readonly as ~/.gitconfig.host.
# Create a real ~/.gitconfig that includes it plus container overrides.
printf '[include]\n\tpath = %s/.gitconfig.host\n[safe]\n\tdirectory = %s\n' \
"$DEV_HOME" "$WORKSPACE" > "$DEV_HOME/.gitconfig"
chown "$TARGET_USER":"$TARGET_USER" "$DEV_HOME/.gitconfig"
# If this is a worktree, the main .git dir is bind-mounted at its
# host absolute path. Grant dev access so git operations work.
GIT_COMMON_DIR=$(git -C "$WORKSPACE" rev-parse --git-common-dir 2>/dev/null || true)
if [ -n "$GIT_COMMON_DIR" ] && [ "$GIT_COMMON_DIR" != "$WORKSPACE/.git" ]; then
[ ! -d "$GIT_COMMON_DIR" ] && GIT_COMMON_DIR="$WORKSPACE/$GIT_COMMON_DIR"
if [ -d "$GIT_COMMON_DIR" ]; then
setfacl -Rm "u:${TARGET_USER}:rwX" "$GIT_COMMON_DIR"
setfacl -Rdm "u:${TARGET_USER}:rwX" "$GIT_COMMON_DIR"
git config -f "$DEV_HOME/.gitconfig" --add safe.directory "$(dirname "$GIT_COMMON_DIR")"
fi
fi
# Also fix bind-mounted dirs under ~dev that appear root-owned.
dir="/home/${TARGET_USER}/.claude"
if [ -d "$dir" ]; then
setfacl -Rm "u:${TARGET_USER}:rwX" "$dir" && setfacl -Rdm "u:${TARGET_USER}:rwX" "$dir"
fi
[ -f /home/"$TARGET_USER"/.claude.json ] && \
setfacl -m "u:${TARGET_USER}:rw" /home/"$TARGET_USER"/.claude.json
else
echo "warning: setfacl not found; dev user may not have write access to workspace" >&2
echo " install the 'acl' package or set remoteUser to root" >&2
fi
fi

104
.devcontainer/init-firewall.sh Executable file
View File

@@ -0,0 +1,104 @@
#!/usr/bin/env bash
set -euo pipefail
echo "Setting up firewall..."
# Preserve docker dns resolution
DOCKER_DNS_RULES=$(iptables-save | grep -E "^-A.*-d 127.0.0.11/32" || true)
# Flush all rules
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -F
iptables -X
# Restore docker dns rules
if [ -n "$DOCKER_DNS_RULES" ]; then
echo "$DOCKER_DNS_RULES" | iptables-restore -n
fi
# Create ipset for allowed destinations
ipset create allowed-domains hash:net || true
ipset flush allowed-domains
# Fetch GitHub IP ranges (IPv4 only -- ipset hash:net and iptables are IPv4)
GITHUB_IPS=$(curl -s https://api.github.com/meta | jq -r '.api[]' 2>/dev/null | grep -v ':' || echo "")
for ip in $GITHUB_IPS; do
if ! ipset add allowed-domains "$ip" -exist 2>&1; then
echo "warning: failed to add GitHub IP $ip to allowlist" >&2
fi
done
# Resolve allowed domains
ALLOWED_DOMAINS=(
"registry.npmjs.org"
"api.anthropic.com"
"api-staging.anthropic.com"
"files.anthropic.com"
"sentry.io"
"update.code.visualstudio.com"
"pypi.org"
"files.pythonhosted.org"
"go.dev"
"storage.googleapis.com"
"static.rust-lang.org"
)
for domain in "${ALLOWED_DOMAINS[@]}"; do
IPS=$(getent ahosts "$domain" 2>/dev/null | awk '{print $1}' | grep -v ':' | sort -u || echo "")
for ip in $IPS; do
if ! ipset add allowed-domains "$ip/32" -exist 2>&1; then
echo "warning: failed to add $domain ($ip) to allowlist" >&2
fi
done
done
# Detect host network
if [[ "${DOCKER_HOST:-}" == "unix://"* ]]; then
DOCKER_GATEWAY=$(ip -4 route show | grep "^default" | awk '{print $3}')
if ! ipset add allowed-domains "$DOCKER_GATEWAY/32" -exist 2>&1; then
echo "warning: failed to add Docker gateway $DOCKER_GATEWAY to allowlist" >&2
fi
fi
# Set default policies to DROP
iptables -P FORWARD DROP
iptables -P INPUT DROP
iptables -P OUTPUT DROP
# Allow established connections
iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
# Allow loopback
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
# Allow DNS
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
# Allow outbound to allowed destinations
iptables -A OUTPUT -m set --match-set allowed-domains dst -j ACCEPT
# Reject unauthorized outbound
iptables -A OUTPUT -j REJECT --reject-with icmp-host-unreachable
# Validate firewall configuration
echo "Validating firewall configuration..."
BLOCKED_SITES=("example.com" "google.com" "facebook.com")
for site in "${BLOCKED_SITES[@]}"; do
if timeout 2 ping -c 1 "$site" &>/dev/null; then
echo "Warning: $site is still reachable"
fi
done
if ! timeout 5 curl -s https://api.github.com/meta > /dev/null; then
echo "Warning: GitHub API is not accessible"
fi
echo "Firewall setup complete"

10
.devcontainer/zshrc Normal file
View File

@@ -0,0 +1,10 @@
# Devcontainer zshrc — sourced automatically for both root and dev users.
# Edit this file to customize the shell without rebuilding the image.
# Auto-activate Python venv
if [ -f /workspace/.venv/bin/activate ]; then
. /workspace/.venv/bin/activate
fi
# Source host zshrc if bind-mounted
[ -f ~/.zshrc.host ] && . ~/.zshrc.host

View File

@@ -156,7 +156,7 @@ jobs:
check-version-tag:
runs-on: ubuntu-slim
timeout-minutes: 10
if: ${{ !startsWith(github.ref_name, 'nightly-latest') && github.event_name != 'workflow_dispatch' }}
if: ${{ !startsWith(github.ref_name, 'nightly-latest') && github.ref_name != 'edge' && github.event_name != 'workflow_dispatch' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # ratchet:actions/checkout@v6

View File

@@ -9,7 +9,6 @@ repos:
rev: d30b4298e4fb63ce8609e29acdbcf4c9018a483c
hooks:
- id: uv-sync
args: ["--locked", "--all-extras"]
- id: uv-lock
- id: uv-export
name: uv-export default.txt
@@ -18,7 +17,7 @@ repos:
"--no-emit-project",
"--no-default-groups",
"--no-hashes",
"--extra",
"--group",
"backend",
"-o",
"backend/requirements/default.txt",
@@ -31,7 +30,7 @@ repos:
"--no-emit-project",
"--no-default-groups",
"--no-hashes",
"--extra",
"--group",
"dev",
"-o",
"backend/requirements/dev.txt",
@@ -44,7 +43,7 @@ repos:
"--no-emit-project",
"--no-default-groups",
"--no-hashes",
"--extra",
"--group",
"ee",
"-o",
"backend/requirements/ee.txt",
@@ -57,7 +56,7 @@ repos:
"--no-emit-project",
"--no-default-groups",
"--no-hashes",
"--extra",
"--group",
"model_server",
"-o",
"backend/requirements/model_server.txt",
@@ -87,6 +86,17 @@ repos:
hooks:
- id: actionlint
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: 745eface02aef23e168a8afb6b5737818efbea95 # frozen: v0.11.0.1
hooks:
- id: shellcheck
exclude: >-
(?x)^(
backend/scripts/setup_craft_templates\.sh|
deployment/docker_compose/init-letsencrypt\.sh|
deployment/docker_compose/install\.sh
)$
- repo: https://github.com/psf/black
rev: 8a737e727ac5ab2f1d4cf5876720ed276dc8dc4b # frozen: 25.1.0
hooks:

3
.vscode/launch.json vendored
View File

@@ -531,8 +531,7 @@
"request": "launch",
"runtimeExecutable": "uv",
"runtimeArgs": [
"sync",
"--all-extras"
"sync"
],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",

View File

@@ -117,7 +117,7 @@ If using PowerShell, the command slightly differs:
Install the required Python dependencies:
```bash
uv sync --all-extras
uv sync
```
Install Playwright for Python (headless browser required by the Web Connector):

View File

@@ -66,7 +66,7 @@ PROVIDER_DISPLAY_NAMES: dict[str, str] = {
LlmProviderNames.LM_STUDIO: "LM Studio",
LlmProviderNames.LITELLM_PROXY: "LiteLLM Proxy",
LlmProviderNames.BIFROST: "Bifrost",
LlmProviderNames.OPENAI_COMPATIBLE: "OpenAI Compatible",
LlmProviderNames.OPENAI_COMPATIBLE: "OpenAI-Compatible",
"groq": "Groq",
"anyscale": "Anyscale",
"deepseek": "DeepSeek",

View File

@@ -338,7 +338,7 @@ def get_provider_display_name(provider_name: str) -> str:
VERTEXAI_PROVIDER_NAME: "Google Vertex AI",
OPENROUTER_PROVIDER_NAME: "OpenRouter",
LITELLM_PROXY_PROVIDER_NAME: "LiteLLM Proxy",
OPENAI_COMPATIBLE_PROVIDER_NAME: "OpenAI Compatible",
OPENAI_COMPATIBLE_PROVIDER_NAME: "OpenAI-Compatible",
}
if provider_name in _ONYX_PROVIDER_DISPLAY_NAMES:

View File

@@ -1674,7 +1674,7 @@ def get_openai_compatible_server_available_models(
)
for r in sorted_results
],
source_label="OpenAI Compatible",
source_label="OpenAI-Compatible",
)
return sorted_results
@@ -1693,6 +1693,6 @@ def _get_openai_compatible_server_response(
return _get_openai_compatible_models_response(
url=url,
source_name="OpenAI Compatible",
source_name="OpenAI-Compatible",
api_key=api_key,
)

View File

@@ -1,10 +0,0 @@
[project]
name = "onyx-backend"
version = "0.0.0"
requires-python = ">=3.11"
dependencies = [
"onyx[backend,dev,ee]",
]
[tool.uv.sources]
onyx = { workspace = true }

View File

@@ -46,11 +46,11 @@ curl -LsSf https://astral.py/uv/install.sh | sh
1. Edit `pyproject.toml`
2. Add/update/remove dependencies in the appropriate section:
- `[dependency-groups]` for dev tools
- `[project.dependencies]` for **shared** dependencies (used by both backend and model_server)
- `[project.optional-dependencies.backend]` for backend-only dependencies
- `[project.optional-dependencies.model_server]` for model_server-only dependencies (ML packages)
- `[project.optional-dependencies.ee]` for EE features
- `[dependency-groups.backend]` for backend-only dependencies
- `[dependency-groups.dev]` for dev tools
- `[dependency-groups.ee]` for EE features
- `[dependency-groups.model_server]` for model_server-only dependencies (ML packages)
3. Commit your changes - pre-commit hooks will automatically regenerate the lock file and requirements
### 3. Generating Lock File and Requirements
@@ -64,10 +64,10 @@ To manually regenerate:
```bash
uv lock
uv export --no-emit-project --no-default-groups --no-hashes --extra backend -o backend/requirements/default.txt
uv export --no-emit-project --no-default-groups --no-hashes --group backend -o backend/requirements/default.txt
uv export --no-emit-project --no-default-groups --no-hashes --group dev -o backend/requirements/dev.txt
uv export --no-emit-project --no-default-groups --no-hashes --extra ee -o backend/requirements/ee.txt
uv export --no-emit-project --no-default-groups --no-hashes --extra model_server -o backend/requirements/model_server.txt
uv export --no-emit-project --no-default-groups --no-hashes --group ee -o backend/requirements/ee.txt
uv export --no-emit-project --no-default-groups --no-hashes --group model_server -o backend/requirements/model_server.txt
```
### 4. Installing Dependencies
@@ -76,30 +76,14 @@ If enabled, all packages are installed automatically by the `uv-sync` pre-commit
branches or pulling new changes.
```bash
# For everything (most common)
uv sync --all-extras
# For development (most common) — installs shared + backend + dev + ee
uv sync
# For backend production (shared + backend dependencies)
uv sync --extra backend
# For backend development (shared + backend + dev tools)
uv sync --extra backend --extra dev
# For backend with EE (shared + backend + ee)
uv sync --extra backend --extra ee
# For backend production only (shared + backend dependencies)
uv sync --no-default-groups --group backend
# For model server (shared + model_server, NO backend deps!)
uv sync --extra model_server
```
`uv` aggressively [ignores active virtual environments](https://docs.astral.sh/uv/concepts/projects/config/#project-environment-path) and prefers the root virtual environment.
When working in workspace packages, be sure to pass `--active` when syncing the virtual environment:
```bash
cd backend/
source .venv/bin/activate
uv sync --active
uv run --active ...
uv sync --no-default-groups --group model_server
```
### 5. Upgrading Dependencies

View File

@@ -1,5 +1,5 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-project --no-default-groups --no-hashes --extra backend -o backend/requirements/default.txt
# uv export --no-emit-project --no-default-groups --no-hashes --group backend -o backend/requirements/default.txt
agent-client-protocol==0.7.1
# via onyx
aioboto3==15.1.0
@@ -19,7 +19,6 @@ aiohttp==3.13.4
# aiobotocore
# discord-py
# litellm
# onyx
# voyageai
aioitertools==0.13.0
# via aiobotocore
@@ -28,7 +27,6 @@ aiolimiter==1.2.1
aiosignal==1.4.0
# via aiohttp
alembic==1.10.4
# via onyx
amqp==5.3.1
# via kombu
annotated-doc==0.0.4
@@ -51,13 +49,10 @@ argon2-cffi==23.1.0
argon2-cffi-bindings==25.1.0
# via argon2-cffi
asana==5.0.8
# via onyx
async-timeout==5.0.1 ; python_full_version < '3.11.3'
# via redis
asyncpg==0.30.0
# via onyx
atlassian-python-api==3.41.16
# via onyx
attrs==25.4.0
# via
# aiohttp
@@ -68,7 +63,6 @@ attrs==25.4.0
authlib==1.6.9
# via fastmcp
azure-cognitiveservices-speech==1.38.0
# via onyx
babel==2.17.0
# via courlan
backoff==2.2.1
@@ -86,7 +80,6 @@ beautifulsoup4==4.12.3
# atlassian-python-api
# markdownify
# markitdown
# onyx
# unstructured
billiard==4.2.3
# via celery
@@ -94,9 +87,7 @@ boto3==1.39.11
# via
# aiobotocore
# cohere
# onyx
boto3-stubs==1.39.11
# via onyx
botocore==1.39.11
# via
# aiobotocore
@@ -105,7 +96,6 @@ botocore==1.39.11
botocore-stubs==1.40.74
# via boto3-stubs
braintrust==0.3.9
# via onyx
brotli==1.2.0
# via onyx
bytecode==0.17.0
@@ -115,7 +105,6 @@ cachetools==6.2.2
caio==0.9.25
# via aiofile
celery==5.5.1
# via onyx
certifi==2025.11.12
# via
# asana
@@ -134,7 +123,6 @@ cffi==2.0.0
# pynacl
# zstandard
chardet==5.2.0
# via onyx
charset-normalizer==3.4.4
# via
# htmldate
@@ -146,7 +134,6 @@ charset-normalizer==3.4.4
chevron==0.14.0
# via braintrust
chonkie==1.0.10
# via onyx
claude-agent-sdk==0.1.19
# via onyx
click==8.3.1
@@ -201,15 +188,12 @@ cryptography==46.0.6
cyclopts==4.2.4
# via fastmcp
dask==2026.1.1
# via
# distributed
# onyx
# via distributed
dataclasses-json==0.6.7
# via unstructured
dateparser==1.2.2
# via htmldate
ddtrace==3.10.0
# via onyx
decorator==5.2.1
# via retry
defusedxml==0.7.1
@@ -223,7 +207,6 @@ deprecated==1.3.1
discord-py==2.4.0
# via onyx
distributed==2026.1.1
# via onyx
distro==1.9.0
# via
# openai
@@ -235,7 +218,6 @@ docstring-parser==0.17.0
docutils==0.22.3
# via rich-rst
dropbox==12.0.2
# via onyx
durationpy==0.10
# via kubernetes
email-validator==2.2.0
@@ -251,7 +233,6 @@ et-xmlfile==2.0.0
events==0.5
# via opensearch-py
exa-py==1.15.4
# via onyx
exceptiongroup==1.3.0
# via
# braintrust
@@ -262,23 +243,16 @@ fastapi==0.133.1
# fastapi-users
# onyx
fastapi-limiter==0.1.6
# via onyx
fastapi-users==15.0.4
# via
# fastapi-users-db-sqlalchemy
# onyx
# via fastapi-users-db-sqlalchemy
fastapi-users-db-sqlalchemy==7.0.0
# via onyx
fastavro==1.12.1
# via cohere
fastmcp==3.2.0
# via onyx
fastuuid==0.14.0
# via litellm
filelock==3.20.3
# via
# huggingface-hub
# onyx
# via huggingface-hub
filetype==1.2.0
# via unstructured
flatbuffers==25.9.23
@@ -298,7 +272,6 @@ gitpython==3.1.45
google-api-core==2.28.1
# via google-api-python-client
google-api-python-client==2.86.0
# via onyx
google-auth==2.48.0
# via
# google-api-core
@@ -308,11 +281,8 @@ google-auth==2.48.0
# google-genai
# kubernetes
google-auth-httplib2==0.1.0
# via
# google-api-python-client
# onyx
# via google-api-python-client
google-auth-oauthlib==1.0.0
# via onyx
google-genai==1.52.0
# via onyx
googleapis-common-protos==1.72.0
@@ -340,7 +310,6 @@ htmldate==1.9.1
httpcore==1.0.9
# via
# httpx
# onyx
# unstructured-client
httplib2==0.31.0
# via
@@ -357,21 +326,16 @@ httpx==0.28.1
# langsmith
# litellm
# mcp
# onyx
# openai
# unstructured-client
httpx-oauth==0.15.1
# via onyx
httpx-sse==0.4.3
# via
# cohere
# mcp
hubspot-api-client==11.1.0
# via onyx
huggingface-hub==0.35.3
# via
# onyx
# tokenizers
# via tokenizers
humanfriendly==10.0
# via coloredlogs
hyperframe==6.1.0
@@ -390,9 +354,7 @@ importlib-metadata==8.7.0
# litellm
# opentelemetry-api
inflection==0.5.1
# via
# onyx
# pyairtable
# via pyairtable
iniconfig==2.3.0
# via pytest
isodate==0.7.2
@@ -414,7 +376,6 @@ jinja2==3.1.6
# distributed
# litellm
jira==3.10.5
# via onyx
jiter==0.12.0
# via openai
jmespath==1.0.1
@@ -430,9 +391,7 @@ jsonpatch==1.33
jsonpointer==3.0.0
# via jsonpatch
jsonref==1.1.0
# via
# fastmcp
# onyx
# via fastmcp
jsonschema==4.25.1
# via
# litellm
@@ -450,15 +409,12 @@ kombu==5.5.4
kubernetes==31.0.0
# via onyx
langchain-core==1.2.22
# via onyx
langdetect==1.0.9
# via unstructured
langfuse==3.10.0
# via onyx
langsmith==0.3.45
# via langchain-core
lazy-imports==1.0.1
# via onyx
legacy-cgi==2.6.4 ; python_full_version >= '3.13'
# via ddtrace
litellm==1.81.6
@@ -473,7 +429,6 @@ lxml==5.3.0
# justext
# lxml-html-clean
# markitdown
# onyx
# python-docx
# python-pptx
# python3-saml
@@ -488,9 +443,7 @@ magika==0.6.3
makefun==1.16.0
# via fastapi-users
mako==1.2.4
# via
# alembic
# onyx
# via alembic
mammoth==1.11.0
# via markitdown
markdown-it-py==4.0.0
@@ -498,7 +451,6 @@ markdown-it-py==4.0.0
markdownify==1.2.2
# via markitdown
markitdown==0.1.2
# via onyx
markupsafe==3.0.3
# via
# jinja2
@@ -512,11 +464,9 @@ mcp==1.26.0
# via
# claude-agent-sdk
# fastmcp
# onyx
mdurl==0.1.2
# via markdown-it-py
mistune==3.2.0
# via onyx
more-itertools==10.8.0
# via
# jaraco-classes
@@ -525,13 +475,10 @@ more-itertools==10.8.0
mpmath==1.3.0
# via sympy
msal==1.34.0
# via
# office365-rest-python-client
# onyx
# via office365-rest-python-client
msgpack==1.1.2
# via distributed
msoffcrypto-tool==5.4.2
# via onyx
multidict==6.7.0
# via
# aiobotocore
@@ -548,7 +495,6 @@ mypy-extensions==1.0.0
# mypy
# typing-inspect
nest-asyncio==1.6.0
# via onyx
nltk==3.9.4
# via unstructured
numpy==2.4.1
@@ -563,10 +509,8 @@ oauthlib==3.2.2
# via
# atlassian-python-api
# kubernetes
# onyx
# requests-oauthlib
office365-rest-python-client==2.6.2
# via onyx
olefile==0.47
# via
# msoffcrypto-tool
@@ -582,15 +526,11 @@ openai==2.14.0
openapi-pydantic==0.5.1
# via fastmcp
openinference-instrumentation==0.1.42
# via onyx
openinference-semantic-conventions==0.1.25
# via openinference-instrumentation
openpyxl==3.0.10
# via
# markitdown
# onyx
# via markitdown
opensearch-py==3.0.0
# via onyx
opentelemetry-api==1.39.1
# via
# ddtrace
@@ -606,7 +546,6 @@ opentelemetry-exporter-otlp-proto-http==1.39.1
# via langfuse
opentelemetry-proto==1.39.1
# via
# onyx
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-http
opentelemetry-sdk==1.39.1
@@ -640,7 +579,6 @@ parameterized==0.9.0
partd==1.4.2
# via dask
passlib==1.7.4
# via onyx
pathable==0.4.4
# via jsonschema-path
pdfminer-six==20251107
@@ -652,9 +590,7 @@ platformdirs==4.5.0
# fastmcp
# zeep
playwright==1.55.0
# via
# onyx
# pytest-playwright
# via pytest-playwright
pluggy==1.6.0
# via pytest
ply==3.11
@@ -684,12 +620,9 @@ protobuf==6.33.5
psutil==7.1.3
# via
# distributed
# onyx
# unstructured
psycopg2-binary==2.9.9
# via onyx
puremagic==1.28
# via onyx
pwdlib==0.3.0
# via fastapi-users
py==1.11.0
@@ -697,7 +630,6 @@ py==1.11.0
py-key-value-aio==0.4.4
# via fastmcp
pyairtable==3.0.1
# via onyx
pyasn1==0.6.3
# via
# pyasn1-modules
@@ -707,7 +639,6 @@ pyasn1-modules==0.4.2
pycparser==2.23 ; implementation_name != 'PyPy'
# via cffi
pycryptodome==3.19.1
# via onyx
pydantic==2.11.7
# via
# agent-client-protocol
@@ -734,7 +665,6 @@ pydantic-settings==2.12.0
pyee==13.0.0
# via playwright
pygithub==2.5.0
# via onyx
pygments==2.20.0
# via rich
pyjwt==2.12.0
@@ -745,17 +675,13 @@ pyjwt==2.12.0
# pygithub
# simple-salesforce
pympler==1.1
# via onyx
pynacl==1.6.2
# via pygithub
pypandoc-binary==1.16.2
# via onyx
pyparsing==3.2.5
# via httplib2
pypdf==6.9.2
# via
# onyx
# unstructured-client
# via unstructured-client
pyperclip==1.11.0
# via fastmcp
pyreadline3==3.5.4 ; sys_platform == 'win32'
@@ -768,9 +694,7 @@ pytest==8.3.5
pytest-base-url==2.1.0
# via pytest-playwright
pytest-mock==3.12.0
# via onyx
pytest-playwright==0.7.0
# via onyx
python-dateutil==2.8.2
# via
# aiobotocore
@@ -781,11 +705,9 @@ python-dateutil==2.8.2
# htmldate
# hubspot-api-client
# kubernetes
# onyx
# opensearch-py
# pandas
python-docx==1.1.2
# via onyx
python-dotenv==1.1.1
# via
# braintrust
@@ -793,10 +715,8 @@ python-dotenv==1.1.1
# litellm
# magika
# mcp
# onyx
# pydantic-settings
python-gitlab==5.6.0
# via onyx
python-http-client==3.3.7
# via sendgrid
python-iso639==2025.11.16
@@ -807,19 +727,15 @@ python-multipart==0.0.22
# via
# fastapi-users
# mcp
# onyx
python-oxmsg==0.0.2
# via unstructured
python-pptx==0.6.23
# via
# markitdown
# onyx
# via markitdown
python-slugify==8.0.4
# via
# braintrust
# pytest-playwright
python3-saml==1.15.0
# via onyx
pytz==2025.2
# via
# dateparser
@@ -827,7 +743,6 @@ pytz==2025.2
# pandas
# zeep
pywikibot==9.0.0
# via onyx
pywin32==311 ; sys_platform == 'win32'
# via
# mcp
@@ -844,13 +759,9 @@ pyyaml==6.0.3
# kubernetes
# langchain-core
rapidfuzz==3.13.0
# via
# onyx
# unstructured
# via unstructured
redis==5.0.8
# via
# fastapi-limiter
# onyx
# via fastapi-limiter
referencing==0.36.2
# via
# jsonschema
@@ -881,7 +792,6 @@ requests==2.33.0
# matrix-client
# msal
# office365-rest-python-client
# onyx
# opensearch-py
# opentelemetry-exporter-otlp-proto-http
# pyairtable
@@ -907,7 +817,6 @@ requests-oauthlib==1.3.1
# google-auth-oauthlib
# jira
# kubernetes
# onyx
requests-toolbelt==1.0.0
# via
# jira
@@ -918,7 +827,6 @@ requests-toolbelt==1.0.0
retry==0.9.2
# via onyx
rfc3986==1.5.0
# via onyx
rich==14.2.0
# via
# cyclopts
@@ -938,15 +846,12 @@ s3transfer==0.13.1
secretstorage==3.5.0 ; sys_platform == 'linux'
# via keyring
sendgrid==6.12.5
# via onyx
sentry-sdk==2.14.0
# via onyx
shapely==2.0.6
# via onyx
shellingham==1.5.4
# via typer
simple-salesforce==1.12.6
# via onyx
six==1.17.0
# via
# asana
@@ -961,7 +866,6 @@ six==1.17.0
# python-dateutil
# stone
slack-sdk==3.20.2
# via onyx
smmap==5.0.2
# via gitdb
sniffio==1.3.1
@@ -976,7 +880,6 @@ sqlalchemy==2.0.15
# via
# alembic
# fastapi-users-db-sqlalchemy
# onyx
sse-starlette==3.0.3
# via mcp
sseclient-py==1.8.0
@@ -985,14 +888,11 @@ starlette==0.49.3
# via
# fastapi
# mcp
# onyx
# prometheus-fastapi-instrumentator
stone==3.3.1
# via dropbox
stripe==10.12.0
# via onyx
supervisor==4.3.0
# via onyx
sympy==1.14.0
# via onnxruntime
tblib==3.2.2
@@ -1005,11 +905,8 @@ tenacity==9.1.2
text-unidecode==1.3
# via python-slugify
tiktoken==0.7.0
# via
# litellm
# onyx
# via litellm
timeago==1.0.16
# via onyx
tld==0.13.1
# via courlan
tokenizers==0.21.4
@@ -1033,13 +930,11 @@ tqdm==4.67.1
# openai
# unstructured
trafilatura==1.12.2
# via onyx
typer==0.20.0
# via mcp
types-awscrt==0.28.4
# via botocore-stubs
types-openpyxl==3.0.4.7
# via onyx
types-requests==2.32.0.20250328
# via cohere
types-s3transfer==0.14.0
@@ -1105,11 +1000,8 @@ tzlocal==5.3.1
uncalled-for==0.2.0
# via fastmcp
unstructured==0.18.27
# via onyx
unstructured-client==0.42.6
# via
# onyx
# unstructured
# via unstructured
uritemplate==4.2.0
# via google-api-python-client
urllib3==2.6.3
@@ -1121,7 +1013,6 @@ urllib3==2.6.3
# htmldate
# hubspot-api-client
# kubernetes
# onyx
# opensearch-py
# pyairtable
# pygithub
@@ -1171,9 +1062,7 @@ xlrd==2.0.2
xlsxwriter==3.2.9
# via python-pptx
xmlsec==1.3.14
# via
# onyx
# python3-saml
# via python3-saml
xmltodict==1.0.2
# via ddtrace
yarl==1.22.0
@@ -1187,4 +1076,3 @@ zipp==3.23.0
zstandard==0.23.0
# via langsmith
zulip==0.8.2
# via onyx

View File

@@ -1,5 +1,5 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-project --no-default-groups --no-hashes --extra dev -o backend/requirements/dev.txt
# uv export --no-emit-project --no-default-groups --no-hashes --group dev -o backend/requirements/dev.txt
agent-client-protocol==0.7.1
# via onyx
aioboto3==15.1.0
@@ -47,7 +47,6 @@ attrs==25.4.0
# jsonschema
# referencing
black==25.1.0
# via onyx
boto3==1.39.11
# via
# aiobotocore
@@ -60,7 +59,6 @@ botocore==1.39.11
brotli==1.2.0
# via onyx
celery-types==0.19.0
# via onyx
certifi==2025.11.12
# via
# httpcore
@@ -122,7 +120,6 @@ execnet==2.1.2
executing==2.2.1
# via stack-data
faker==40.1.2
# via onyx
fastapi==0.133.1
# via
# onyx
@@ -156,7 +153,6 @@ h11==0.16.0
# httpcore
# uvicorn
hatchling==1.28.0
# via onyx
hf-xet==1.2.0 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
# via huggingface-hub
httpcore==1.0.9
@@ -187,7 +183,6 @@ importlib-metadata==8.7.0
iniconfig==2.3.0
# via pytest
ipykernel==6.29.5
# via onyx
ipython==9.7.0
# via ipykernel
ipython-pygments-lexers==1.1.1
@@ -224,13 +219,11 @@ litellm==1.81.6
mako==1.2.4
# via alembic
manygo==0.2.0
# via onyx
markupsafe==3.0.3
# via
# jinja2
# mako
matplotlib==3.10.8
# via onyx
matplotlib-inline==0.2.1
# via
# ipykernel
@@ -243,12 +236,10 @@ multidict==6.7.0
# aiohttp
# yarl
mypy==1.13.0
# via onyx
mypy-extensions==1.0.0
# via
# black
# mypy
# onyx
nest-asyncio==1.6.0
# via ipykernel
nodeenv==1.9.1
@@ -263,16 +254,13 @@ oauthlib==3.2.2
# via
# kubernetes
# requests-oauthlib
onyx-devtools==0.7.2
# via onyx
onyx-devtools==0.7.4
openai==2.14.0
# via
# litellm
# onyx
openapi-generator-cli==7.17.0
# via
# onyx
# onyx-devtools
# via onyx-devtools
packaging==24.2
# via
# black
@@ -282,7 +270,6 @@ packaging==24.2
# matplotlib
# pytest
pandas-stubs==2.3.3.251201
# via onyx
parameterized==0.9.0
# via cohere
parso==0.8.5
@@ -305,7 +292,6 @@ pluggy==1.6.0
# hatchling
# pytest
pre-commit==3.2.2
# via onyx
prometheus-client==0.23.1
# via
# onyx
@@ -359,22 +345,16 @@ pyparsing==3.2.5
# via matplotlib
pytest==8.3.5
# via
# onyx
# pytest-alembic
# pytest-asyncio
# pytest-dotenv
# pytest-repeat
# pytest-xdist
pytest-alembic==0.12.1
# via onyx
pytest-asyncio==1.3.0
# via onyx
pytest-dotenv==0.5.2
# via onyx
pytest-repeat==0.9.4
# via onyx
pytest-xdist==3.8.0
# via onyx
python-dateutil==2.8.2
# via
# aiobotocore
@@ -407,9 +387,7 @@ referencing==0.36.2
regex==2025.11.3
# via tiktoken
release-tag==0.5.2
# via onyx
reorder-python-imports-black==3.14.0
# via onyx
requests==2.33.0
# via
# cohere
@@ -430,7 +408,6 @@ rpds-py==0.29.0
rsa==4.9.1
# via google-auth
ruff==0.12.0
# via onyx
s3transfer==0.13.1
# via boto3
sentry-sdk==2.14.0
@@ -484,39 +461,22 @@ traitlets==5.14.3
trove-classifiers==2025.12.1.14
# via hatchling
types-beautifulsoup4==4.12.0.3
# via onyx
types-html5lib==1.1.11.13
# via
# onyx
# types-beautifulsoup4
# via types-beautifulsoup4
types-oauthlib==3.2.0.9
# via onyx
types-passlib==1.7.7.20240106
# via onyx
types-pillow==10.2.0.20240822
# via onyx
types-psutil==7.1.3.20251125
# via onyx
types-psycopg2==2.9.21.10
# via onyx
types-python-dateutil==2.8.19.13
# via onyx
types-pytz==2023.3.1.1
# via
# onyx
# pandas-stubs
# via pandas-stubs
types-pyyaml==6.0.12.11
# via onyx
types-regex==2023.3.23.1
# via onyx
types-requests==2.32.0.20250328
# via
# cohere
# onyx
# via cohere
types-retry==0.9.9.3
# via onyx
types-setuptools==68.0.0.3
# via onyx
typing-extensions==4.15.0
# via
# aiosignal
@@ -574,4 +534,3 @@ yarl==1.22.0
zipp==3.23.0
# via importlib-metadata
zizmor==1.18.0
# via onyx

View File

@@ -1,5 +1,5 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-project --no-default-groups --no-hashes --extra ee -o backend/requirements/ee.txt
# uv export --no-emit-project --no-default-groups --no-hashes --group ee -o backend/requirements/ee.txt
agent-client-protocol==0.7.1
# via onyx
aioboto3==15.1.0
@@ -182,7 +182,6 @@ packaging==24.2
parameterized==0.9.0
# via cohere
posthog==3.7.4
# via onyx
prometheus-client==0.23.1
# via
# onyx

View File

@@ -1,7 +1,6 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-project --no-default-groups --no-hashes --extra model_server -o backend/requirements/model_server.txt
# uv export --no-emit-project --no-default-groups --no-hashes --group model_server -o backend/requirements/model_server.txt
accelerate==1.6.0
# via onyx
agent-client-protocol==0.7.1
# via onyx
aioboto3==15.1.0
@@ -105,7 +104,6 @@ distro==1.9.0
durationpy==0.10
# via kubernetes
einops==0.8.1
# via onyx
fastapi==0.133.1
# via
# onyx
@@ -207,7 +205,6 @@ networkx==3.5
numpy==2.4.1
# via
# accelerate
# onyx
# scikit-learn
# scipy
# transformers
@@ -363,7 +360,6 @@ s3transfer==0.13.1
safetensors==0.5.3
# via
# accelerate
# onyx
# transformers
scikit-learn==1.7.2
# via sentence-transformers
@@ -372,7 +368,6 @@ scipy==1.16.3
# scikit-learn
# sentence-transformers
sentence-transformers==4.0.2
# via onyx
sentry-sdk==2.14.0
# via onyx
setuptools==80.9.0 ; python_full_version >= '3.12'
@@ -411,7 +406,6 @@ tokenizers==0.21.4
torch==2.9.1
# via
# accelerate
# onyx
# sentence-transformers
tqdm==4.67.1
# via
@@ -420,9 +414,7 @@ tqdm==4.67.1
# sentence-transformers
# transformers
transformers==4.53.0
# via
# onyx
# sentence-transformers
# via sentence-transformers
triton==3.5.1 ; platform_machine == 'x86_64' and sys_platform == 'linux'
# via torch
types-requests==2.32.0.20250328

View File

@@ -46,7 +46,7 @@ stop_and_remove_containers
# Start the PostgreSQL container with optional volume
echo "Starting PostgreSQL container..."
if [[ -n "$POSTGRES_VOLUME" ]]; then
docker run -p 5432:5432 --name onyx_postgres -e POSTGRES_PASSWORD=password -d -v $POSTGRES_VOLUME:/var/lib/postgresql/data postgres -c max_connections=250
docker run -p 5432:5432 --name onyx_postgres -e POSTGRES_PASSWORD=password -d -v "$POSTGRES_VOLUME":/var/lib/postgresql/data postgres -c max_connections=250
else
docker run -p 5432:5432 --name onyx_postgres -e POSTGRES_PASSWORD=password -d postgres -c max_connections=250
fi
@@ -54,7 +54,7 @@ fi
# Start the Vespa container with optional volume
echo "Starting Vespa container..."
if [[ -n "$VESPA_VOLUME" ]]; then
docker run --detach --name onyx_vespa --hostname vespa-container --publish 8081:8081 --publish 19071:19071 -v $VESPA_VOLUME:/opt/vespa/var vespaengine/vespa:8
docker run --detach --name onyx_vespa --hostname vespa-container --publish 8081:8081 --publish 19071:19071 -v "$VESPA_VOLUME":/opt/vespa/var vespaengine/vespa:8
else
docker run --detach --name onyx_vespa --hostname vespa-container --publish 8081:8081 --publish 19071:19071 vespaengine/vespa:8
fi
@@ -85,7 +85,7 @@ docker compose -f "$COMPOSE_FILE" -f "$COMPOSE_DEV_FILE" --profile opensearch-en
# Start the Redis container with optional volume
echo "Starting Redis container..."
if [[ -n "$REDIS_VOLUME" ]]; then
docker run --detach --name onyx_redis --publish 6379:6379 -v $REDIS_VOLUME:/data redis
docker run --detach --name onyx_redis --publish 6379:6379 -v "$REDIS_VOLUME":/data redis
else
docker run --detach --name onyx_redis --publish 6379:6379 redis
fi
@@ -93,7 +93,7 @@ fi
# Start the MinIO container with optional volume
echo "Starting MinIO container..."
if [[ -n "$MINIO_VOLUME" ]]; then
docker run --detach --name onyx_minio --publish 9004:9000 --publish 9005:9001 -e MINIO_ROOT_USER=minioadmin -e MINIO_ROOT_PASSWORD=minioadmin -v $MINIO_VOLUME:/data minio/minio server /data --console-address ":9001"
docker run --detach --name onyx_minio --publish 9004:9000 --publish 9005:9001 -e MINIO_ROOT_USER=minioadmin -e MINIO_ROOT_PASSWORD=minioadmin -v "$MINIO_VOLUME":/data minio/minio server /data --console-address ":9001"
else
docker run --detach --name onyx_minio --publish 9004:9000 --publish 9005:9001 -e MINIO_ROOT_USER=minioadmin -e MINIO_ROOT_PASSWORD=minioadmin minio/minio server /data --console-address ":9001"
fi
@@ -111,6 +111,7 @@ sleep 1
# Alembic should be configured in the virtualenv for this repo
if [[ -f "../.venv/bin/activate" ]]; then
# shellcheck source=/dev/null
source ../.venv/bin/activate
else
echo "Warning: Python virtual environment not found at .venv/bin/activate; alembic may not work."

View File

@@ -58,8 +58,7 @@ SERVICE_ORDER=(
validate_template() {
local template_file=$1
echo "Validating template: $template_file..."
aws cloudformation validate-template --template-body file://"$template_file" --region "$AWS_REGION" > /dev/null
if [ $? -ne 0 ]; then
if ! aws cloudformation validate-template --template-body file://"$template_file" --region "$AWS_REGION" > /dev/null; then
echo "Error: Validation failed for $template_file. Exiting."
exit 1
fi
@@ -108,13 +107,15 @@ deploy_stack() {
fi
# Create temporary parameters file for this template
local temp_params_file=$(create_parameters_from_json "$template_file")
local temp_params_file
temp_params_file=$(create_parameters_from_json "$template_file")
# Special handling for SubnetIDs parameter if needed
if grep -q "SubnetIDs" "$template_file"; then
echo "Template uses SubnetIDs parameter, ensuring it's properly formatted..."
# Make sure we're passing SubnetIDs as a comma-separated list
local subnet_ids=$(remove_comments "$CONFIG_FILE" | jq -r '.SubnetIDs // empty')
local subnet_ids
subnet_ids=$(remove_comments "$CONFIG_FILE" | jq -r '.SubnetIDs // empty')
if [ -n "$subnet_ids" ]; then
echo "Using SubnetIDs from config: $subnet_ids"
else
@@ -123,15 +124,13 @@ deploy_stack() {
fi
echo "Deploying stack: $stack_name with template: $template_file and generated config from: $CONFIG_FILE..."
aws cloudformation deploy \
if ! aws cloudformation deploy \
--stack-name "$stack_name" \
--template-file "$template_file" \
--parameter-overrides file://"$temp_params_file" \
--capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND \
--region "$AWS_REGION" \
--no-cli-auto-prompt > /dev/null
if [ $? -ne 0 ]; then
--no-cli-auto-prompt > /dev/null; then
echo "Error: Deployment failed for $stack_name. Exiting."
exit 1
fi

View File

@@ -52,11 +52,9 @@ delete_stack() {
--region "$AWS_REGION"
echo "Waiting for stack $stack_name to be deleted..."
aws cloudformation wait stack-delete-complete \
if aws cloudformation wait stack-delete-complete \
--stack-name "$stack_name" \
--region "$AWS_REGION"
if [ $? -eq 0 ]; then
--region "$AWS_REGION"; then
echo "Stack $stack_name deleted successfully."
sleep 10
else

View File

@@ -1,3 +1,4 @@
#!/bin/sh
# fill in the template
export ONYX_BACKEND_API_HOST="${ONYX_BACKEND_API_HOST:-api_server}"
export ONYX_WEB_SERVER_HOST="${ONYX_WEB_SERVER_HOST:-web_server}"
@@ -16,12 +17,15 @@ echo "Using web server host: $ONYX_WEB_SERVER_HOST"
echo "Using MCP server host: $ONYX_MCP_SERVER_HOST"
echo "Using nginx proxy timeouts - connect: ${NGINX_PROXY_CONNECT_TIMEOUT}s, send: ${NGINX_PROXY_SEND_TIMEOUT}s, read: ${NGINX_PROXY_READ_TIMEOUT}s"
# shellcheck disable=SC2016
envsubst '$DOMAIN $SSL_CERT_FILE_NAME $SSL_CERT_KEY_FILE_NAME $ONYX_BACKEND_API_HOST $ONYX_WEB_SERVER_HOST $ONYX_MCP_SERVER_HOST $NGINX_PROXY_CONNECT_TIMEOUT $NGINX_PROXY_SEND_TIMEOUT $NGINX_PROXY_READ_TIMEOUT' < "/etc/nginx/conf.d/$1" > /etc/nginx/conf.d/app.conf
# Conditionally create MCP server configuration
if [ "${MCP_SERVER_ENABLED}" = "True" ] || [ "${MCP_SERVER_ENABLED}" = "true" ]; then
echo "MCP server is enabled, creating MCP configuration..."
# shellcheck disable=SC2016
envsubst '$ONYX_MCP_SERVER_HOST' < "/etc/nginx/conf.d/mcp_upstream.conf.inc.template" > /etc/nginx/conf.d/mcp_upstream.conf.inc
# shellcheck disable=SC2016
envsubst '$ONYX_MCP_SERVER_HOST' < "/etc/nginx/conf.d/mcp.conf.inc.template" > /etc/nginx/conf.d/mcp.conf.inc
else
echo "MCP server is disabled, removing MCP configuration..."

View File

@@ -19,6 +19,6 @@ dependencies:
version: 5.4.0
- name: code-interpreter
repository: https://onyx-dot-app.github.io/python-sandbox/
version: 0.3.2
digest: sha256:74908ea45ace2b4be913ff762772e6d87e40bab64e92c6662aa51730eaeb9d87
generated: "2026-04-06T15:34:02.597166-07:00"
version: 0.3.3
digest: sha256:a57f29088b1624a72f6c70e4c3ccc2f2aad675e4624278c4e9be92083d6d5dad
generated: "2026-04-08T16:47:29.33368-07:00"

View File

@@ -5,7 +5,7 @@ home: https://www.onyx.app/
sources:
- "https://github.com/onyx-dot-app/onyx"
type: application
version: 0.4.40
version: 0.4.41
appVersion: latest
annotations:
category: Productivity
@@ -45,6 +45,6 @@ dependencies:
repository: https://charts.min.io/
condition: minio.enabled
- name: code-interpreter
version: 0.3.2
version: 0.3.3
repository: https://onyx-dot-app.github.io/python-sandbox/
condition: codeInterpreter.enabled

View File

@@ -0,0 +1,23 @@
{{- if .Values.monitoring.serviceMonitors.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "onyx.fullname" . }}-api
labels:
{{- include "onyx.labels" . | nindent 4 }}
{{- with .Values.monitoring.serviceMonitors.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ .Values.api.deploymentLabels.app }}
endpoints:
- port: api-server-port
path: /metrics
interval: 30s
scrapeTimeout: 10s
{{- end }}

View File

@@ -70,6 +70,10 @@ spec:
"-Q",
"connector_pruning,connector_doc_permissions_sync,connector_external_group_sync,csv_generation,sandbox",
]
ports:
- name: metrics
containerPort: 9094
protocol: TCP
resources:
{{- toYaml .Values.celery_worker_heavy.resources | nindent 12 }}
envFrom:

View File

@@ -74,4 +74,29 @@ spec:
interval: 30s
scrapeTimeout: 10s
{{- end }}
{{- if gt (int .Values.celery_worker_heavy.replicaCount) 0 }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "onyx.fullname" . }}-celery-worker-heavy
labels:
{{- include "onyx.labels" . | nindent 4 }}
{{- with .Values.monitoring.serviceMonitors.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ .Values.celery_worker_heavy.deploymentLabels.app }}
metrics: "true"
endpoints:
- port: metrics
path: /metrics
interval: 30s
scrapeTimeout: 10s
{{- end }}
{{- end }}

View File

@@ -264,7 +264,7 @@ monitoring:
# The sidecar must be configured with label selector: grafana_dashboard=1
enabled: false
serviceMonitors:
# -- Set to true to deploy ServiceMonitor resources for Celery worker metrics endpoints.
# -- Set to true to deploy ServiceMonitor resources for API server and Celery worker metrics endpoints.
# Requires the Prometheus Operator CRDs (included in kube-prometheus-stack).
# Use `labels` to match your Prometheus CR's serviceMonitorSelector (e.g. release: onyx-monitoring).
enabled: false

View File

@@ -22,6 +22,10 @@ variable "CLI_REPOSITORY" {
default = "onyxdotapp/onyx-cli"
}
variable "DEVCONTAINER_REPOSITORY" {
default = "onyxdotapp/onyx-devcontainer"
}
variable "TAG" {
default = "latest"
}
@@ -90,3 +94,16 @@ target "cli" {
tags = ["${CLI_REPOSITORY}:${TAG}"]
}
target "devcontainer" {
context = ".devcontainer"
dockerfile = "Dockerfile"
cache-from = [
"type=registry,ref=${DEVCONTAINER_REPOSITORY}:latest",
"type=registry,ref=${DEVCONTAINER_REPOSITORY}:edge",
]
cache-to = ["type=inline"]
tags = ["${DEVCONTAINER_REPOSITORY}:${TAG}"]
}

View File

@@ -28,7 +28,7 @@ dependencies = [
"kubernetes>=31.0.0",
]
[project.optional-dependencies]
[dependency-groups]
# Main backend application dependencies
backend = [
"aiohttp==3.13.4",
@@ -148,7 +148,7 @@ dev = [
"matplotlib==3.10.8",
"mypy-extensions==1.0.0",
"mypy==1.13.0",
"onyx-devtools==0.7.2",
"onyx-devtools==0.7.4",
"openapi-generator-cli==7.17.0",
"pandas-stubs~=2.3.3",
"pre-commit==3.2.2",
@@ -195,6 +195,9 @@ model_server = [
"sentry-sdk[fastapi,celery,starlette]==2.14.0",
]
[tool.uv]
default-groups = ["backend", "dev", "ee", "model_server"]
[tool.mypy]
plugins = "sqlalchemy.ext.mypy.plugin"
mypy_path = "backend"
@@ -230,7 +233,7 @@ follow_imports = "skip"
ignore_errors = true
[tool.uv.workspace]
members = ["backend", "tools/ods"]
members = ["tools/ods"]
[tool.basedpyright]
include = ["backend"]

View File

@@ -244,6 +244,54 @@ ods web lint
ods web test --watch
```
### `dev` - Devcontainer Management
Manage the Onyx devcontainer. Also available as `ods dc`.
Requires the [devcontainer CLI](https://github.com/devcontainers/cli) (`npm install -g @devcontainers/cli`).
```shell
ods dev <subcommand>
```
**Subcommands:**
- `up` - Start the devcontainer (pulls the image if needed)
- `into` - Open a zsh shell inside the running devcontainer
- `exec` - Run an arbitrary command inside the devcontainer
- `restart` - Remove and recreate the devcontainer
- `rebuild` - Pull the latest published image and recreate
- `stop` - Stop the running devcontainer
The devcontainer image is published to `onyxdotapp/onyx-devcontainer` and
referenced by tag in `.devcontainer/devcontainer.json` — no local build needed.
**Examples:**
```shell
# Start the devcontainer
ods dev up
# Open a shell
ods dev into
# Run a command
ods dev exec -- npm test
# Restart the container
ods dev restart
# Pull latest image and recreate
ods dev rebuild
# Stop the container
ods dev stop
# Same commands work with the dc alias
ods dc up
ods dc into
```
### `db` - Database Administration
Manage PostgreSQL database dumps, restores, and migrations.

34
tools/ods/cmd/dev.go Normal file
View File

@@ -0,0 +1,34 @@
package cmd
import (
"github.com/spf13/cobra"
)
// NewDevCommand creates the parent dev command for devcontainer operations.
func NewDevCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "dev",
Aliases: []string{"dc"},
Short: "Manage the devcontainer",
Long: `Manage the Onyx devcontainer.
Wraps the devcontainer CLI with workspace-aware defaults.
Commands:
up Start the devcontainer
into Open a shell inside the running devcontainer
exec Run a command inside the devcontainer
restart Remove and recreate the devcontainer
rebuild Pull the latest image and recreate
stop Stop the running devcontainer`,
}
cmd.AddCommand(newDevUpCommand())
cmd.AddCommand(newDevIntoCommand())
cmd.AddCommand(newDevExecCommand())
cmd.AddCommand(newDevRestartCommand())
cmd.AddCommand(newDevRebuildCommand())
cmd.AddCommand(newDevStopCommand())
return cmd
}

29
tools/ods/cmd/dev_exec.go Normal file
View File

@@ -0,0 +1,29 @@
package cmd
import (
"github.com/spf13/cobra"
)
func newDevExecCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "exec [--] <command> [args...]",
Short: "Run a command inside the devcontainer",
Long: `Run an arbitrary command inside the running devcontainer.
All arguments are treated as positional (flags like -it are passed through).
Examples:
ods dev exec npm test
ods dev exec -- ls -la
ods dev exec -it echo hello`,
Args: cobra.MinimumNArgs(1),
DisableFlagParsing: true,
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 0 && args[0] == "--" {
args = args[1:]
}
runDevExec(args)
},
}
return cmd
}

51
tools/ods/cmd/dev_into.go Normal file
View File

@@ -0,0 +1,51 @@
package cmd
import (
"os"
"os/exec"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/onyx-dot-app/onyx/tools/ods/internal/paths"
)
func newDevIntoCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "into",
Short: "Open a shell inside the running devcontainer",
Long: `Open an interactive zsh shell inside the running devcontainer.
Examples:
ods dev into`,
Run: func(cmd *cobra.Command, args []string) {
runDevExec([]string{"zsh"})
},
}
return cmd
}
// runDevExec executes "devcontainer exec --workspace-folder <root> <command...>".
func runDevExec(command []string) {
checkDevcontainerCLI()
root, err := paths.GitRoot()
if err != nil {
log.Fatalf("Failed to find git root: %v", err)
}
args := []string{"exec", "--workspace-folder", root}
args = append(args, command...)
log.Debugf("Running: devcontainer %v", args)
c := exec.Command("devcontainer", args...)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Stdin = os.Stdin
if err := c.Run(); err != nil {
log.Fatalf("devcontainer exec failed: %v", err)
}
}

View File

@@ -0,0 +1,41 @@
package cmd
import (
"os"
"os/exec"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func newDevRebuildCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "rebuild",
Short: "Pull the latest devcontainer image and recreate",
Long: `Pull the latest devcontainer image and recreate the container.
Use after the published image has been updated or after changing devcontainer.json.
Examples:
ods dev rebuild`,
Run: func(cmd *cobra.Command, args []string) {
runDevRebuild()
},
}
return cmd
}
func runDevRebuild() {
image := devcontainerImage()
log.Infof("Pulling %s...", image)
pull := exec.Command("docker", "pull", image)
pull.Stdout = os.Stdout
pull.Stderr = os.Stderr
if err := pull.Run(); err != nil {
log.Warnf("Failed to pull image (continuing with local copy): %v", err)
}
runDevcontainer("up", []string{"--remove-existing-container"})
}

View File

@@ -0,0 +1,23 @@
package cmd
import (
"github.com/spf13/cobra"
)
func newDevRestartCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "restart",
Short: "Remove and recreate the devcontainer",
Long: `Remove the existing devcontainer and recreate it.
Uses the cached image — for a full image rebuild, use "ods dev rebuild".
Examples:
ods dev restart`,
Run: func(cmd *cobra.Command, args []string) {
runDevcontainer("up", []string{"--remove-existing-container"})
},
}
return cmd
}

56
tools/ods/cmd/dev_stop.go Normal file
View File

@@ -0,0 +1,56 @@
package cmd
import (
"os/exec"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/onyx-dot-app/onyx/tools/ods/internal/paths"
)
func newDevStopCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "stop",
Short: "Stop the running devcontainer",
Long: `Stop the running devcontainer.
Examples:
ods dev stop`,
Run: func(cmd *cobra.Command, args []string) {
runDevStop()
},
}
return cmd
}
func runDevStop() {
root, err := paths.GitRoot()
if err != nil {
log.Fatalf("Failed to find git root: %v", err)
}
// Find the container by the devcontainer label
out, err := exec.Command(
"docker", "ps", "-q",
"--filter", "label=devcontainer.local_folder="+root,
).Output()
if err != nil {
log.Fatalf("Failed to find devcontainer: %v", err)
}
containerID := strings.TrimSpace(string(out))
if containerID == "" {
log.Info("No running devcontainer found")
return
}
log.Infof("Stopping devcontainer %s...", containerID)
c := exec.Command("docker", "stop", containerID)
if err := c.Run(); err != nil {
log.Fatalf("Failed to stop devcontainer: %v", err)
}
log.Info("Devcontainer stopped")
}

177
tools/ods/cmd/dev_up.go Normal file
View File

@@ -0,0 +1,177 @@
package cmd
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/onyx-dot-app/onyx/tools/ods/internal/paths"
)
func newDevUpCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "up",
Short: "Start the devcontainer",
Long: `Start the devcontainer, pulling the image if needed.
Examples:
ods dev up`,
Run: func(cmd *cobra.Command, args []string) {
runDevcontainer("up", nil)
},
}
return cmd
}
// devcontainerImage reads the image field from .devcontainer/devcontainer.json.
func devcontainerImage() string {
root, err := paths.GitRoot()
if err != nil {
log.Fatalf("Failed to find git root: %v", err)
}
data, err := os.ReadFile(filepath.Join(root, ".devcontainer", "devcontainer.json"))
if err != nil {
log.Fatalf("Failed to read devcontainer.json: %v", err)
}
var cfg struct {
Image string `json:"image"`
}
if err := json.Unmarshal(data, &cfg); err != nil {
log.Fatalf("Failed to parse devcontainer.json: %v", err)
}
if cfg.Image == "" {
log.Fatal("No image field in devcontainer.json")
}
return cfg.Image
}
// checkDevcontainerCLI ensures the devcontainer CLI is installed.
func checkDevcontainerCLI() {
if _, err := exec.LookPath("devcontainer"); err != nil {
log.Fatal("devcontainer CLI is not installed. Install it with: npm install -g @devcontainers/cli")
}
}
// ensureDockerSock sets the DOCKER_SOCK environment variable if not already set.
// devcontainer.json references ${localEnv:DOCKER_SOCK} for the socket mount.
func ensureDockerSock() {
if os.Getenv("DOCKER_SOCK") != "" {
return
}
sock := detectDockerSock()
if err := os.Setenv("DOCKER_SOCK", sock); err != nil {
log.Fatalf("Failed to set DOCKER_SOCK: %v", err)
}
}
// detectDockerSock returns the path to the Docker socket on the host.
func detectDockerSock() string {
// Prefer explicit DOCKER_HOST (strip unix:// prefix if present).
if dh := os.Getenv("DOCKER_HOST"); dh != "" {
const prefix = "unix://"
if len(dh) > len(prefix) && dh[:len(prefix)] == prefix {
return dh[len(prefix):]
}
// Only bare paths (starting with /) are valid socket paths.
// Non-unix schemes (e.g. tcp://) can't be bind-mounted.
if len(dh) > 0 && dh[0] == '/' {
return dh
}
log.Warnf("DOCKER_HOST=%q is not a unix socket path; falling back to local socket detection", dh)
}
// Linux rootless Docker: $XDG_RUNTIME_DIR/docker.sock
if runtime.GOOS == "linux" {
if xdg := os.Getenv("XDG_RUNTIME_DIR"); xdg != "" {
sock := filepath.Join(xdg, "docker.sock")
if _, err := os.Stat(sock); err == nil {
return sock
}
}
}
// macOS Docker Desktop: ~/.docker/run/docker.sock
if runtime.GOOS == "darwin" {
if home, err := os.UserHomeDir(); err == nil {
sock := filepath.Join(home, ".docker", "run", "docker.sock")
if _, err := os.Stat(sock); err == nil {
return sock
}
}
}
// Fallback: standard socket path (Linux with standard Docker, macOS symlink)
return "/var/run/docker.sock"
}
// worktreeGitMount returns a --mount flag value that makes a git worktree's
// .git reference resolve inside the container. In a worktree, .git is a file
// containing "gitdir: /path/to/main/.git/worktrees/<name>", so we need the
// main repo's .git directory to exist at the same absolute host path inside
// the container.
//
// Returns ("", false) when the workspace is not a worktree.
func worktreeGitMount(root string) (string, bool) {
dotgit := filepath.Join(root, ".git")
info, err := os.Lstat(dotgit)
if err != nil || info.IsDir() {
return "", false // regular repo or no .git
}
// .git is a file — parse the gitdir path.
out, err := exec.Command("git", "-C", root, "rev-parse", "--git-common-dir").Output()
if err != nil {
log.Warnf("Failed to detect git common dir: %v", err)
return "", false
}
commonDir := strings.TrimSpace(string(out))
// Resolve to absolute path.
if !filepath.IsAbs(commonDir) {
commonDir = filepath.Join(root, commonDir)
}
commonDir, _ = filepath.EvalSymlinks(commonDir)
mount := fmt.Sprintf("type=bind,source=%s,target=%s", commonDir, commonDir)
log.Debugf("Worktree detected — mounting main .git: %s", commonDir)
return mount, true
}
// runDevcontainer executes "devcontainer <action> --workspace-folder <root> [extraArgs...]".
func runDevcontainer(action string, extraArgs []string) {
checkDevcontainerCLI()
ensureDockerSock()
root, err := paths.GitRoot()
if err != nil {
log.Fatalf("Failed to find git root: %v", err)
}
args := []string{action, "--workspace-folder", root}
if mount, ok := worktreeGitMount(root); ok {
args = append(args, "--mount", mount)
}
args = append(args, extraArgs...)
log.Debugf("Running: devcontainer %v", args)
c := exec.Command("devcontainer", args...)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.Stdin = os.Stdin
if err := c.Run(); err != nil {
log.Fatalf("devcontainer %s failed: %v", action, err)
}
}

View File

@@ -53,6 +53,7 @@ func NewRootCommand() *cobra.Command {
cmd.AddCommand(NewRunCICommand())
cmd.AddCommand(NewScreenshotDiffCommand())
cmd.AddCommand(NewDesktopCommand())
cmd.AddCommand(NewDevCommand())
cmd.AddCommand(NewWebCommand())
cmd.AddCommand(NewLatestStableTagCommand())
cmd.AddCommand(NewWhoisCommand())

324
uv.lock generated
View File

@@ -14,12 +14,6 @@ resolution-markers = [
"python_full_version < '3.12' and sys_platform != 'win32'",
]
[manifest]
members = [
"onyx",
"onyx-backend",
]
[[package]]
name = "accelerate"
version = "1.6.0"
@@ -4234,7 +4228,7 @@ dependencies = [
{ name = "voyageai" },
]
[package.optional-dependencies]
[package.dev-dependencies]
backend = [
{ name = "aiohttp" },
{ name = "alembic" },
@@ -4388,195 +4382,191 @@ model-server = [
[package.metadata]
requires-dist = [
{ name = "accelerate", marker = "extra == 'model-server'", specifier = "==1.6.0" },
{ name = "agent-client-protocol", specifier = ">=0.7.1" },
{ name = "aioboto3", specifier = "==15.1.0" },
{ name = "aiohttp", marker = "extra == 'backend'", specifier = "==3.13.4" },
{ name = "alembic", marker = "extra == 'backend'", specifier = "==1.10.4" },
{ name = "asana", marker = "extra == 'backend'", specifier = "==5.0.8" },
{ name = "asyncpg", marker = "extra == 'backend'", specifier = "==0.30.0" },
{ name = "atlassian-python-api", marker = "extra == 'backend'", specifier = "==3.41.16" },
{ name = "azure-cognitiveservices-speech", marker = "extra == 'backend'", specifier = "==1.38.0" },
{ name = "beautifulsoup4", marker = "extra == 'backend'", specifier = "==4.12.3" },
{ name = "black", marker = "extra == 'dev'", specifier = "==25.1.0" },
{ name = "boto3", marker = "extra == 'backend'", specifier = "==1.39.11" },
{ name = "boto3-stubs", extras = ["s3"], marker = "extra == 'backend'", specifier = "==1.39.11" },
{ name = "braintrust", marker = "extra == 'backend'", specifier = "==0.3.9" },
{ name = "brotli", specifier = ">=1.2.0" },
{ name = "celery", marker = "extra == 'backend'", specifier = "==5.5.1" },
{ name = "celery-types", marker = "extra == 'dev'", specifier = "==0.19.0" },
{ name = "chardet", marker = "extra == 'backend'", specifier = "==5.2.0" },
{ name = "chonkie", marker = "extra == 'backend'", specifier = "==1.0.10" },
{ name = "claude-agent-sdk", specifier = ">=0.1.19" },
{ name = "cohere", specifier = "==5.6.1" },
{ name = "dask", marker = "extra == 'backend'", specifier = "==2026.1.1" },
{ name = "ddtrace", marker = "extra == 'backend'", specifier = "==3.10.0" },
{ name = "discord-py", specifier = "==2.4.0" },
{ name = "discord-py", marker = "extra == 'backend'", specifier = "==2.4.0" },
{ name = "distributed", marker = "extra == 'backend'", specifier = "==2026.1.1" },
{ name = "dropbox", marker = "extra == 'backend'", specifier = "==12.0.2" },
{ name = "einops", marker = "extra == 'model-server'", specifier = "==0.8.1" },
{ name = "exa-py", marker = "extra == 'backend'", specifier = "==1.15.4" },
{ name = "faker", marker = "extra == 'dev'", specifier = "==40.1.2" },
{ name = "fastapi", specifier = "==0.133.1" },
{ name = "fastapi-limiter", marker = "extra == 'backend'", specifier = "==0.1.6" },
{ name = "fastapi-users", marker = "extra == 'backend'", specifier = "==15.0.4" },
{ name = "fastapi-users-db-sqlalchemy", marker = "extra == 'backend'", specifier = "==7.0.0" },
{ name = "fastmcp", marker = "extra == 'backend'", specifier = "==3.2.0" },
{ name = "filelock", marker = "extra == 'backend'", specifier = "==3.20.3" },
{ name = "google-api-python-client", marker = "extra == 'backend'", specifier = "==2.86.0" },
{ name = "google-auth-httplib2", marker = "extra == 'backend'", specifier = "==0.1.0" },
{ name = "google-auth-oauthlib", marker = "extra == 'backend'", specifier = "==1.0.0" },
{ name = "google-genai", specifier = "==1.52.0" },
{ name = "hatchling", marker = "extra == 'dev'", specifier = "==1.28.0" },
{ name = "httpcore", marker = "extra == 'backend'", specifier = "==1.0.9" },
{ name = "httpx", extras = ["http2"], marker = "extra == 'backend'", specifier = "==0.28.1" },
{ name = "httpx-oauth", marker = "extra == 'backend'", specifier = "==0.15.1" },
{ name = "hubspot-api-client", marker = "extra == 'backend'", specifier = "==11.1.0" },
{ name = "huggingface-hub", marker = "extra == 'backend'", specifier = "==0.35.3" },
{ name = "inflection", marker = "extra == 'backend'", specifier = "==0.5.1" },
{ name = "ipykernel", marker = "extra == 'dev'", specifier = "==6.29.5" },
{ name = "jira", marker = "extra == 'backend'", specifier = "==3.10.5" },
{ name = "jsonref", marker = "extra == 'backend'", specifier = "==1.1.0" },
{ name = "kubernetes", specifier = ">=31.0.0" },
{ name = "kubernetes", marker = "extra == 'backend'", specifier = "==31.0.0" },
{ name = "langchain-core", marker = "extra == 'backend'", specifier = "==1.2.22" },
{ name = "langfuse", marker = "extra == 'backend'", specifier = "==3.10.0" },
{ name = "lazy-imports", marker = "extra == 'backend'", specifier = "==1.0.1" },
{ name = "litellm", specifier = "==1.81.6" },
{ name = "lxml", marker = "extra == 'backend'", specifier = "==5.3.0" },
{ name = "mako", marker = "extra == 'backend'", specifier = "==1.2.4" },
{ name = "manygo", marker = "extra == 'dev'", specifier = "==0.2.0" },
{ name = "markitdown", extras = ["pdf", "docx", "pptx", "xlsx", "xls"], marker = "extra == 'backend'", specifier = "==0.1.2" },
{ name = "matplotlib", marker = "extra == 'dev'", specifier = "==3.10.8" },
{ name = "mcp", extras = ["cli"], marker = "extra == 'backend'", specifier = "==1.26.0" },
{ name = "mistune", marker = "extra == 'backend'", specifier = "==3.2.0" },
{ name = "msal", marker = "extra == 'backend'", specifier = "==1.34.0" },
{ name = "msoffcrypto-tool", marker = "extra == 'backend'", specifier = "==5.4.2" },
{ name = "mypy", marker = "extra == 'dev'", specifier = "==1.13.0" },
{ name = "mypy-extensions", marker = "extra == 'dev'", specifier = "==1.0.0" },
{ name = "nest-asyncio", marker = "extra == 'backend'", specifier = "==1.6.0" },
{ name = "numpy", marker = "extra == 'model-server'", specifier = "==2.4.1" },
{ name = "oauthlib", marker = "extra == 'backend'", specifier = "==3.2.2" },
{ name = "office365-rest-python-client", marker = "extra == 'backend'", specifier = "==2.6.2" },
{ name = "onyx-devtools", marker = "extra == 'dev'", specifier = "==0.7.2" },
{ name = "openai", specifier = "==2.14.0" },
{ name = "openapi-generator-cli", marker = "extra == 'dev'", specifier = "==7.17.0" },
{ name = "openinference-instrumentation", marker = "extra == 'backend'", specifier = "==0.1.42" },
{ name = "openpyxl", marker = "extra == 'backend'", specifier = "==3.0.10" },
{ name = "opensearch-py", marker = "extra == 'backend'", specifier = "==3.0.0" },
{ name = "opentelemetry-proto", marker = "extra == 'backend'", specifier = ">=1.39.0" },
{ name = "pandas-stubs", marker = "extra == 'dev'", specifier = "~=2.3.3" },
{ name = "passlib", marker = "extra == 'backend'", specifier = "==1.7.4" },
{ name = "playwright", marker = "extra == 'backend'", specifier = "==1.55.0" },
{ name = "posthog", marker = "extra == 'ee'", specifier = "==3.7.4" },
{ name = "pre-commit", marker = "extra == 'dev'", specifier = "==3.2.2" },
{ name = "prometheus-client", specifier = ">=0.21.1" },
{ name = "prometheus-fastapi-instrumentator", specifier = "==7.1.0" },
{ name = "psutil", marker = "extra == 'backend'", specifier = "==7.1.3" },
{ name = "psycopg2-binary", marker = "extra == 'backend'", specifier = "==2.9.9" },
{ name = "puremagic", marker = "extra == 'backend'", specifier = "==1.28" },
{ name = "pyairtable", marker = "extra == 'backend'", specifier = "==3.0.1" },
{ name = "pycryptodome", marker = "extra == 'backend'", specifier = "==3.19.1" },
{ name = "pydantic", specifier = "==2.11.7" },
{ name = "pygithub", marker = "extra == 'backend'", specifier = "==2.5.0" },
{ name = "pympler", marker = "extra == 'backend'", specifier = "==1.1" },
{ name = "pypandoc-binary", marker = "extra == 'backend'", specifier = "==1.16.2" },
{ name = "pypdf", marker = "extra == 'backend'", specifier = "==6.9.2" },
{ name = "pytest", marker = "extra == 'dev'", specifier = "==8.3.5" },
{ name = "pytest-alembic", marker = "extra == 'dev'", specifier = "==0.12.1" },
{ name = "pytest-asyncio", marker = "extra == 'dev'", specifier = "==1.3.0" },
{ name = "pytest-dotenv", marker = "extra == 'dev'", specifier = "==0.5.2" },
{ name = "pytest-mock", marker = "extra == 'backend'", specifier = "==3.12.0" },
{ name = "pytest-playwright", marker = "extra == 'backend'", specifier = "==0.7.0" },
{ name = "pytest-repeat", marker = "extra == 'dev'", specifier = "==0.9.4" },
{ name = "pytest-xdist", marker = "extra == 'dev'", specifier = "==3.8.0" },
{ name = "python-dateutil", marker = "extra == 'backend'", specifier = "==2.8.2" },
{ name = "python-docx", marker = "extra == 'backend'", specifier = "==1.1.2" },
{ name = "python-dotenv", marker = "extra == 'backend'", specifier = "==1.1.1" },
{ name = "python-gitlab", marker = "extra == 'backend'", specifier = "==5.6.0" },
{ name = "python-multipart", marker = "extra == 'backend'", specifier = "==0.0.22" },
{ name = "python-pptx", marker = "extra == 'backend'", specifier = "==0.6.23" },
{ name = "python3-saml", marker = "extra == 'backend'", specifier = "==1.15.0" },
{ name = "pywikibot", marker = "extra == 'backend'", specifier = "==9.0.0" },
{ name = "rapidfuzz", marker = "extra == 'backend'", specifier = "==3.13.0" },
{ name = "redis", marker = "extra == 'backend'", specifier = "==5.0.8" },
{ name = "release-tag", marker = "extra == 'dev'", specifier = "==0.5.2" },
{ name = "reorder-python-imports-black", marker = "extra == 'dev'", specifier = "==3.14.0" },
{ name = "requests", marker = "extra == 'backend'", specifier = "==2.33.0" },
{ name = "requests-oauthlib", marker = "extra == 'backend'", specifier = "==1.3.1" },
{ name = "retry", specifier = "==0.9.2" },
{ name = "rfc3986", marker = "extra == 'backend'", specifier = "==1.5.0" },
{ name = "ruff", marker = "extra == 'dev'", specifier = "==0.12.0" },
{ name = "safetensors", marker = "extra == 'model-server'", specifier = "==0.5.3" },
{ name = "sendgrid", marker = "extra == 'backend'", specifier = "==6.12.5" },
{ name = "sentence-transformers", marker = "extra == 'model-server'", specifier = "==4.0.2" },
{ name = "sentry-sdk", specifier = "==2.14.0" },
{ name = "sentry-sdk", extras = ["fastapi", "celery", "starlette"], marker = "extra == 'model-server'", specifier = "==2.14.0" },
{ name = "shapely", marker = "extra == 'backend'", specifier = "==2.0.6" },
{ name = "simple-salesforce", marker = "extra == 'backend'", specifier = "==1.12.6" },
{ name = "slack-sdk", marker = "extra == 'backend'", specifier = "==3.20.2" },
{ name = "sqlalchemy", extras = ["mypy"], marker = "extra == 'backend'", specifier = "==2.0.15" },
{ name = "starlette", marker = "extra == 'backend'", specifier = "==0.49.3" },
{ name = "stripe", marker = "extra == 'backend'", specifier = "==10.12.0" },
{ name = "supervisor", marker = "extra == 'backend'", specifier = "==4.3.0" },
{ name = "tiktoken", marker = "extra == 'backend'", specifier = "==0.7.0" },
{ name = "timeago", marker = "extra == 'backend'", specifier = "==1.0.16" },
{ name = "torch", marker = "extra == 'model-server'", specifier = "==2.9.1" },
{ name = "trafilatura", marker = "extra == 'backend'", specifier = "==1.12.2" },
{ name = "transformers", marker = "extra == 'model-server'", specifier = "==4.53.0" },
{ name = "types-beautifulsoup4", marker = "extra == 'dev'", specifier = "==4.12.0.3" },
{ name = "types-html5lib", marker = "extra == 'dev'", specifier = "==1.1.11.13" },
{ name = "types-oauthlib", marker = "extra == 'dev'", specifier = "==3.2.0.9" },
{ name = "types-openpyxl", marker = "extra == 'backend'", specifier = "==3.0.4.7" },
{ name = "types-passlib", marker = "extra == 'dev'", specifier = "==1.7.7.20240106" },
{ name = "types-pillow", marker = "extra == 'dev'", specifier = "==10.2.0.20240822" },
{ name = "types-psutil", marker = "extra == 'dev'", specifier = "==7.1.3.20251125" },
{ name = "types-psycopg2", marker = "extra == 'dev'", specifier = "==2.9.21.10" },
{ name = "types-python-dateutil", marker = "extra == 'dev'", specifier = "==2.8.19.13" },
{ name = "types-pytz", marker = "extra == 'dev'", specifier = "==2023.3.1.1" },
{ name = "types-pyyaml", marker = "extra == 'dev'", specifier = "==6.0.12.11" },
{ name = "types-regex", marker = "extra == 'dev'", specifier = "==2023.3.23.1" },
{ name = "types-requests", marker = "extra == 'dev'", specifier = "==2.32.0.20250328" },
{ name = "types-retry", marker = "extra == 'dev'", specifier = "==0.9.9.3" },
{ name = "types-setuptools", marker = "extra == 'dev'", specifier = "==68.0.0.3" },
{ name = "unstructured", marker = "extra == 'backend'", specifier = "==0.18.27" },
{ name = "unstructured-client", marker = "extra == 'backend'", specifier = "==0.42.6" },
{ name = "urllib3", marker = "extra == 'backend'", specifier = "==2.6.3" },
{ name = "uvicorn", specifier = "==0.35.0" },
{ name = "voyageai", specifier = "==0.2.3" },
{ name = "xmlsec", marker = "extra == 'backend'", specifier = "==1.3.14" },
{ name = "zizmor", marker = "extra == 'dev'", specifier = "==1.18.0" },
{ name = "zulip", marker = "extra == 'backend'", specifier = "==0.8.2" },
]
provides-extras = ["backend", "dev", "ee", "model-server"]
[[package]]
name = "onyx-backend"
version = "0.0.0"
source = { virtual = "backend" }
dependencies = [
{ name = "onyx", extra = ["backend", "dev", "ee"] },
]
[package.metadata]
requires-dist = [{ name = "onyx", extras = ["backend", "dev", "ee"], editable = "." }]
[package.metadata.requires-dev]
backend = [
{ name = "aiohttp", specifier = "==3.13.4" },
{ name = "alembic", specifier = "==1.10.4" },
{ name = "asana", specifier = "==5.0.8" },
{ name = "asyncpg", specifier = "==0.30.0" },
{ name = "atlassian-python-api", specifier = "==3.41.16" },
{ name = "azure-cognitiveservices-speech", specifier = "==1.38.0" },
{ name = "beautifulsoup4", specifier = "==4.12.3" },
{ name = "boto3", specifier = "==1.39.11" },
{ name = "boto3-stubs", extras = ["s3"], specifier = "==1.39.11" },
{ name = "braintrust", specifier = "==0.3.9" },
{ name = "celery", specifier = "==5.5.1" },
{ name = "chardet", specifier = "==5.2.0" },
{ name = "chonkie", specifier = "==1.0.10" },
{ name = "dask", specifier = "==2026.1.1" },
{ name = "ddtrace", specifier = "==3.10.0" },
{ name = "discord-py", specifier = "==2.4.0" },
{ name = "distributed", specifier = "==2026.1.1" },
{ name = "dropbox", specifier = "==12.0.2" },
{ name = "exa-py", specifier = "==1.15.4" },
{ name = "fastapi-limiter", specifier = "==0.1.6" },
{ name = "fastapi-users", specifier = "==15.0.4" },
{ name = "fastapi-users-db-sqlalchemy", specifier = "==7.0.0" },
{ name = "fastmcp", specifier = "==3.2.0" },
{ name = "filelock", specifier = "==3.20.3" },
{ name = "google-api-python-client", specifier = "==2.86.0" },
{ name = "google-auth-httplib2", specifier = "==0.1.0" },
{ name = "google-auth-oauthlib", specifier = "==1.0.0" },
{ name = "httpcore", specifier = "==1.0.9" },
{ name = "httpx", extras = ["http2"], specifier = "==0.28.1" },
{ name = "httpx-oauth", specifier = "==0.15.1" },
{ name = "hubspot-api-client", specifier = "==11.1.0" },
{ name = "huggingface-hub", specifier = "==0.35.3" },
{ name = "inflection", specifier = "==0.5.1" },
{ name = "jira", specifier = "==3.10.5" },
{ name = "jsonref", specifier = "==1.1.0" },
{ name = "kubernetes", specifier = "==31.0.0" },
{ name = "langchain-core", specifier = "==1.2.22" },
{ name = "langfuse", specifier = "==3.10.0" },
{ name = "lazy-imports", specifier = "==1.0.1" },
{ name = "lxml", specifier = "==5.3.0" },
{ name = "mako", specifier = "==1.2.4" },
{ name = "markitdown", extras = ["pdf", "docx", "pptx", "xlsx", "xls"], specifier = "==0.1.2" },
{ name = "mcp", extras = ["cli"], specifier = "==1.26.0" },
{ name = "mistune", specifier = "==3.2.0" },
{ name = "msal", specifier = "==1.34.0" },
{ name = "msoffcrypto-tool", specifier = "==5.4.2" },
{ name = "nest-asyncio", specifier = "==1.6.0" },
{ name = "oauthlib", specifier = "==3.2.2" },
{ name = "office365-rest-python-client", specifier = "==2.6.2" },
{ name = "openinference-instrumentation", specifier = "==0.1.42" },
{ name = "openpyxl", specifier = "==3.0.10" },
{ name = "opensearch-py", specifier = "==3.0.0" },
{ name = "opentelemetry-proto", specifier = ">=1.39.0" },
{ name = "passlib", specifier = "==1.7.4" },
{ name = "playwright", specifier = "==1.55.0" },
{ name = "psutil", specifier = "==7.1.3" },
{ name = "psycopg2-binary", specifier = "==2.9.9" },
{ name = "puremagic", specifier = "==1.28" },
{ name = "pyairtable", specifier = "==3.0.1" },
{ name = "pycryptodome", specifier = "==3.19.1" },
{ name = "pygithub", specifier = "==2.5.0" },
{ name = "pympler", specifier = "==1.1" },
{ name = "pypandoc-binary", specifier = "==1.16.2" },
{ name = "pypdf", specifier = "==6.9.2" },
{ name = "pytest-mock", specifier = "==3.12.0" },
{ name = "pytest-playwright", specifier = "==0.7.0" },
{ name = "python-dateutil", specifier = "==2.8.2" },
{ name = "python-docx", specifier = "==1.1.2" },
{ name = "python-dotenv", specifier = "==1.1.1" },
{ name = "python-gitlab", specifier = "==5.6.0" },
{ name = "python-multipart", specifier = "==0.0.22" },
{ name = "python-pptx", specifier = "==0.6.23" },
{ name = "python3-saml", specifier = "==1.15.0" },
{ name = "pywikibot", specifier = "==9.0.0" },
{ name = "rapidfuzz", specifier = "==3.13.0" },
{ name = "redis", specifier = "==5.0.8" },
{ name = "requests", specifier = "==2.33.0" },
{ name = "requests-oauthlib", specifier = "==1.3.1" },
{ name = "rfc3986", specifier = "==1.5.0" },
{ name = "sendgrid", specifier = "==6.12.5" },
{ name = "shapely", specifier = "==2.0.6" },
{ name = "simple-salesforce", specifier = "==1.12.6" },
{ name = "slack-sdk", specifier = "==3.20.2" },
{ name = "sqlalchemy", extras = ["mypy"], specifier = "==2.0.15" },
{ name = "starlette", specifier = "==0.49.3" },
{ name = "stripe", specifier = "==10.12.0" },
{ name = "supervisor", specifier = "==4.3.0" },
{ name = "tiktoken", specifier = "==0.7.0" },
{ name = "timeago", specifier = "==1.0.16" },
{ name = "trafilatura", specifier = "==1.12.2" },
{ name = "types-openpyxl", specifier = "==3.0.4.7" },
{ name = "unstructured", specifier = "==0.18.27" },
{ name = "unstructured-client", specifier = "==0.42.6" },
{ name = "urllib3", specifier = "==2.6.3" },
{ name = "xmlsec", specifier = "==1.3.14" },
{ name = "zulip", specifier = "==0.8.2" },
]
dev = [
{ name = "black", specifier = "==25.1.0" },
{ name = "celery-types", specifier = "==0.19.0" },
{ name = "faker", specifier = "==40.1.2" },
{ name = "hatchling", specifier = "==1.28.0" },
{ name = "ipykernel", specifier = "==6.29.5" },
{ name = "manygo", specifier = "==0.2.0" },
{ name = "matplotlib", specifier = "==3.10.8" },
{ name = "mypy", specifier = "==1.13.0" },
{ name = "mypy-extensions", specifier = "==1.0.0" },
{ name = "onyx-devtools", specifier = "==0.7.4" },
{ name = "openapi-generator-cli", specifier = "==7.17.0" },
{ name = "pandas-stubs", specifier = "~=2.3.3" },
{ name = "pre-commit", specifier = "==3.2.2" },
{ name = "pytest", specifier = "==8.3.5" },
{ name = "pytest-alembic", specifier = "==0.12.1" },
{ name = "pytest-asyncio", specifier = "==1.3.0" },
{ name = "pytest-dotenv", specifier = "==0.5.2" },
{ name = "pytest-repeat", specifier = "==0.9.4" },
{ name = "pytest-xdist", specifier = "==3.8.0" },
{ name = "release-tag", specifier = "==0.5.2" },
{ name = "reorder-python-imports-black", specifier = "==3.14.0" },
{ name = "ruff", specifier = "==0.12.0" },
{ name = "types-beautifulsoup4", specifier = "==4.12.0.3" },
{ name = "types-html5lib", specifier = "==1.1.11.13" },
{ name = "types-oauthlib", specifier = "==3.2.0.9" },
{ name = "types-passlib", specifier = "==1.7.7.20240106" },
{ name = "types-pillow", specifier = "==10.2.0.20240822" },
{ name = "types-psutil", specifier = "==7.1.3.20251125" },
{ name = "types-psycopg2", specifier = "==2.9.21.10" },
{ name = "types-python-dateutil", specifier = "==2.8.19.13" },
{ name = "types-pytz", specifier = "==2023.3.1.1" },
{ name = "types-pyyaml", specifier = "==6.0.12.11" },
{ name = "types-regex", specifier = "==2023.3.23.1" },
{ name = "types-requests", specifier = "==2.32.0.20250328" },
{ name = "types-retry", specifier = "==0.9.9.3" },
{ name = "types-setuptools", specifier = "==68.0.0.3" },
{ name = "zizmor", specifier = "==1.18.0" },
]
ee = [{ name = "posthog", specifier = "==3.7.4" }]
model-server = [
{ name = "accelerate", specifier = "==1.6.0" },
{ name = "einops", specifier = "==0.8.1" },
{ name = "numpy", specifier = "==2.4.1" },
{ name = "safetensors", specifier = "==0.5.3" },
{ name = "sentence-transformers", specifier = "==4.0.2" },
{ name = "sentry-sdk", extras = ["fastapi", "celery", "starlette"], specifier = "==2.14.0" },
{ name = "torch", specifier = "==2.9.1" },
{ name = "transformers", specifier = "==4.53.0" },
]
[[package]]
name = "onyx-devtools"
version = "0.7.2"
version = "0.7.4"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "fastapi" },
{ name = "openapi-generator-cli" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/22/b0/765ed49157470e8ccc8ab89e6a896ade50cde3aa2a494662ad4db92a48c4/onyx_devtools-0.7.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:553a2b5e61b29b7913c991c8d5aed78f930f0f81a0f42229c6a8de2b1e8ff57e", size = 4203859, upload-time = "2026-03-27T15:09:49.63Z" },
{ url = "https://files.pythonhosted.org/packages/f7/9d/bba0a44a16d2fc27e5441aaf10727e10514e7a49bce70eca02bced566eb9/onyx_devtools-0.7.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5cf0782dca8b3d861de9e18e65e990cfce5161cd559df44d8fabd3fefd54fdcd", size = 3879750, upload-time = "2026-03-27T15:09:42.413Z" },
{ url = "https://files.pythonhosted.org/packages/4d/d8/c5725e8af14c74fe0aeed29e4746400bb3c0a078fd1240df729dc6432b84/onyx_devtools-0.7.2-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:9a0d67373e16b4fbb38a5290c0d9dfd4cfa837e5da0c165b32841b9d37f7455b", size = 3743529, upload-time = "2026-03-27T15:09:44.546Z" },
{ url = "https://files.pythonhosted.org/packages/1a/82/b7c398a21dbc3e14fd7a29e49caa86b1bc0f8d7c75c051514785441ab779/onyx_devtools-0.7.2-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:794af14b2de575d0ae41b94551399eca8f8ba9b950c5db7acb7612767fd228f9", size = 4166562, upload-time = "2026-03-27T15:09:49.471Z" },
{ url = "https://files.pythonhosted.org/packages/26/76/be129e2baafc91fe792d919b1f4d73fc943ba9c2b728a60f1fb98e0c115a/onyx_devtools-0.7.2-py3-none-win_amd64.whl", hash = "sha256:83b3eb84df58d865e4f714222a5fab3ea464836e2c8690569454a940bbb651ff", size = 4282270, upload-time = "2026-03-27T15:09:44.676Z" },
{ url = "https://files.pythonhosted.org/packages/3b/72/29b8c8dbcf069c56475f00511f04c4aaa5ba3faba1dfc8276107d4b3ef7f/onyx_devtools-0.7.2-py3-none-win_arm64.whl", hash = "sha256:62f0836624ee6a5b31e64fd93162e7fce142ac8a4f959607e411824bc2b88174", size = 3823053, upload-time = "2026-03-27T15:09:43.546Z" },
{ url = "https://files.pythonhosted.org/packages/cc/3f/584bb003333b6e6d632b06bbf99d410c7a71adde1711076fd44fe88d966d/onyx_devtools-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6c51d9199ff8ff8fe64a3cfcf77f8170508722b33a1de54c5474be0447b7afa8", size = 4237700, upload-time = "2026-04-09T21:28:20.694Z" },
{ url = "https://files.pythonhosted.org/packages/0a/04/8c28522d51a66b1bdc997a1c72821122eab23f048459646c6ee62a39f6eb/onyx_devtools-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f64a4cec6d3616b9ca7354e326994882c9ff2cb3f9fc9a44e55f0eb6a6ff1c1c", size = 3912751, upload-time = "2026-04-09T21:28:23.079Z" },
{ url = "https://files.pythonhosted.org/packages/8c/e6/ae60307cc50064dacb58e003c9a367d5c85118fd89a597abf3de5fd66f0a/onyx_devtools-0.7.4-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:31c7cecaaa329e3f6d53864290bc53fd0b823453c6cfdb8be7931a8925f5c075", size = 3778188, upload-time = "2026-04-09T21:28:23.14Z" },
{ url = "https://files.pythonhosted.org/packages/f1/d1/5a2789efac7d8f19d30d4d8da1862dd10a16b65d8c9b200542a959094a17/onyx_devtools-0.7.4-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:4c44e3c21253ea92127af483155190c14426c729d93e244aedc33875f74d3514", size = 4200526, upload-time = "2026-04-09T21:28:23.711Z" },
{ url = "https://files.pythonhosted.org/packages/0a/40/56a467eaa7b78411971898191cf0dc3ee49b7f448d1cfe76cd432f6458d3/onyx_devtools-0.7.4-py3-none-win_amd64.whl", hash = "sha256:6fa2b63b702bc5ecbeed5f9eadec57d61ac5c4a646cf5fbd66ee340f53b7d81c", size = 4319090, upload-time = "2026-04-09T21:28:23.26Z" },
{ url = "https://files.pythonhosted.org/packages/fa/ef/c866fa8ce1f75e1ac67bc239e767b8944cb1a12a44950986ce57e06db17f/onyx_devtools-0.7.4-py3-none-win_arm64.whl", hash = "sha256:c84cbe6a85474dc9f005f079796cf031e80c4249897432ad9f370cd27f72970a", size = 3857229, upload-time = "2026-04-09T21:28:23.484Z" },
]
[[package]]

View File

@@ -68,9 +68,7 @@ SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")"
# Run the conversion into a temp file so a failed run doesn't destroy an existing .tsx
TMPFILE="${BASE_NAME}.tsx.tmp"
bunx @svgr/cli "$SVG_FILE" --typescript --svgo-config "$SVGO_CONFIG" --template "${SCRIPT_DIR}/icon-template.js" > "$TMPFILE"
if [ $? -eq 0 ]; then
if bunx @svgr/cli "$SVG_FILE" --typescript --svgo-config "$SVGO_CONFIG" --template "${SCRIPT_DIR}/icon-template.js" > "$TMPFILE"; then
# Verify the temp file has content before replacing the destination
if [ ! -s "$TMPFILE" ]; then
rm -f "$TMPFILE"
@@ -84,16 +82,14 @@ if [ $? -eq 0 ]; then
# Using perl for cross-platform compatibility (works on macOS, Linux, Windows with WSL)
# Note: perl -i returns 0 even on some failures, so we validate the output
perl -i -pe 's/<svg/<svg width={size} height={size}/g' "${BASE_NAME}.tsx"
if [ $? -ne 0 ]; then
if ! perl -i -pe 's/<svg/<svg width={size} height={size}/g' "${BASE_NAME}.tsx"; then
echo "Error: Failed to add width/height attributes" >&2
exit 1
fi
# Icons additionally get stroke="currentColor"
if [ "$MODE" = "icon" ]; then
perl -i -pe 's/\{\.\.\.props\}/stroke="currentColor" {...props}/g' "${BASE_NAME}.tsx"
if [ $? -ne 0 ]; then
if ! perl -i -pe 's/\{\.\.\.props\}/stroke="currentColor" {...props}/g' "${BASE_NAME}.tsx"; then
echo "Error: Failed to add stroke attribute" >&2
exit 1
fi

View File

@@ -171,6 +171,7 @@ export { default as SvgTrash } from "@opal/icons/trash";
export { default as SvgTwoLineSmall } from "@opal/icons/two-line-small";
export { default as SvgUnplug } from "@opal/icons/unplug";
export { default as SvgUploadCloud } from "@opal/icons/upload-cloud";
export { default as SvgUploadSquare } from "@opal/icons/upload-square";
export { default as SvgUser } from "@opal/icons/user";
export { default as SvgUserCheck } from "@opal/icons/user-check";
export { default as SvgUserEdit } from "@opal/icons/user-edit";

View File

@@ -0,0 +1,22 @@
import type { IconProps } from "@opal/types";
const SvgUploadSquare = ({ size, ...props }: IconProps) => (
<svg
width={size}
height={size}
viewBox="0 0 16 16"
fill="none"
xmlns="http://www.w3.org/2000/svg"
stroke="currentColor"
{...props}
>
<path
d="M11 14H12.6667C13.3929 14 14 13.3929 14 12.6667V3.33333C14 2.60711 13.3929 2 12.6667 2H3.33333C2.60711 2 2 2.60711 2 3.33333V12.6667C2 13.3929 2.60711 14 3.33333 14H5M10.6666 8.16667L7.99998 5.5M7.99998 5.5L5.33331 8.16667M7.99998 5.5V14"
strokeWidth={1.5}
strokeLinecap="round"
strokeLinejoin="round"
/>
</svg>
);
export default SvgUploadSquare;

View File

@@ -17,6 +17,7 @@ import DocumentSetCard from "@/sections/cards/DocumentSetCard";
import CollapsibleSection from "@/app/admin/agents/CollapsibleSection";
import { StandardAnswerCategoryResponse } from "@/components/standardAnswers/getStandardAnswerCategoriesIfEE";
import { StandardAnswerCategoryDropdownField } from "@/components/standardAnswers/StandardAnswerCategoryDropdown";
import InputComboBox from "@/refresh-components/inputs/InputComboBox";
import { RadioGroup } from "@/components/ui/radio-group";
import { RadioGroupItemField } from "@/components/ui/RadioGroupItemField";
import { AlertCircle } from "lucide-react";
@@ -126,6 +127,24 @@ export function SlackChannelConfigFormFields({
return documentSets.filter((ds) => !documentSetContainsSync(ds));
}, [documentSets]);
const searchAgentOptions = useMemo(
() =>
availableAgents.map((persona) => ({
label: persona.name,
value: String(persona.id),
})),
[availableAgents]
);
const nonSearchAgentOptions = useMemo(
() =>
nonSearchAgents.map((persona) => ({
label: persona.name,
value: String(persona.id),
})),
[nonSearchAgents]
);
useEffect(() => {
const invalidSelected = values.document_sets.filter((dsId: number) =>
unselectableSets.some((us) => us.id === dsId)
@@ -355,12 +374,14 @@ export function SlackChannelConfigFormFields({
</>
</SubLabel>
<SelectorFormField
name="persona_id"
options={availableAgents.map((persona) => ({
name: persona.name,
value: persona.id,
}))}
<InputComboBox
placeholder="Search for an agent..."
value={String(values.persona_id ?? "")}
onValueChange={(val) =>
setFieldValue("persona_id", val ? Number(val) : null)
}
options={searchAgentOptions}
strict
/>
{viewSyncEnabledAgents && syncEnabledAgents.length > 0 && (
<div className="mt-4">
@@ -419,12 +440,14 @@ export function SlackChannelConfigFormFields({
</>
</SubLabel>
<SelectorFormField
name="persona_id"
options={nonSearchAgents.map((persona) => ({
name: persona.name,
value: persona.id,
}))}
<InputComboBox
placeholder="Search for an agent..."
value={String(values.persona_id ?? "")}
onValueChange={(val) =>
setFieldValue("persona_id", val ? Number(val) : null)
}
options={nonSearchAgentOptions}
strict
/>
</div>
)}

View File

@@ -1,5 +1,5 @@
import { defaultTailwindCSS } from "@/components/icons/icons";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getModelIcon } from "@/lib/llmConfig";
import { IconProps } from "@opal/types";
export interface ModelIconProps extends IconProps {

View File

@@ -1 +1 @@
export { default } from "@/refresh-pages/admin/LLMProviderConfigurationPage";
export { default } from "@/refresh-pages/admin/LLMConfigurationPage";

View File

@@ -5,7 +5,7 @@ import { Button } from "@opal/components";
import { Text } from "@opal/components";
import { ContentAction } from "@opal/layouts";
import { SvgEyeOff, SvgX } from "@opal/icons";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getModelIcon } from "@/lib/llmConfig";
import AgentMessage, {
AgentMessageProps,
} from "@/app/app/message/messageComponents/AgentMessage";
@@ -28,6 +28,8 @@ export interface MultiModelPanelProps {
isNonPreferredInSelection: boolean;
/** Callback when user clicks this panel to select as preferred */
onSelect: () => void;
/** Callback to deselect this panel as preferred */
onDeselect?: () => void;
/** Callback to hide/show this panel */
onToggleVisibility: () => void;
/** Props to pass through to AgentMessage */
@@ -63,6 +65,7 @@ export default function MultiModelPanel({
isHidden,
isNonPreferredInSelection,
onSelect,
onDeselect,
onToggleVisibility,
agentMessageProps,
errorMessage,
@@ -93,11 +96,25 @@ export default function MultiModelPanel({
rightChildren={
<div className="flex items-center gap-1 px-2">
{isPreferred && (
<span className="text-action-link-05 shrink-0">
<Text font="secondary-body" color="inherit" nowrap>
Preferred Response
</Text>
</span>
<>
<span className="text-action-link-05 shrink-0">
<Text font="secondary-body" color="inherit" nowrap>
Preferred Response
</Text>
</span>
{onDeselect && (
<Button
prominence="tertiary"
icon={SvgX}
size="sm"
onClick={(e) => {
e.stopPropagation();
onDeselect();
}}
tooltip="Deselect preferred response"
/>
)}
</>
)}
{!isPreferred && (
<Button

View File

@@ -30,7 +30,7 @@ const SELECTION_PANEL_W = 400;
// Compact width for hidden panels in the carousel track
const HIDDEN_PANEL_W = 220;
// Generation-mode panel widths (from Figma)
const GEN_PANEL_W_2 = 640; // 2 panels side-by-side
const GEN_PANEL_W_2 = 720; // 2 panels side-by-side
const GEN_PANEL_W_3 = 436; // 3 panels side-by-side
// Gap between panels — matches CSS gap-6 (24px)
const PANEL_GAP = 24;
@@ -64,14 +64,31 @@ export default function MultiModelResponseView({
onMessageSelection,
onHiddenPanelsChange,
}: MultiModelResponseViewProps) {
const [preferredIndex, setPreferredIndex] = useState<number | null>(null);
// Initialize preferredIndex from the backend's preferred_response_id when
// loading an existing conversation.
const [preferredIndex, setPreferredIndex] = useState<number | null>(() => {
if (!parentMessage?.preferredResponseId) return null;
const match = responses.find(
(r) => r.messageId === parentMessage.preferredResponseId
);
return match?.modelIndex ?? null;
});
const [hiddenPanels, setHiddenPanels] = useState<Set<number>>(new Set());
// Controls animation: false = panels at start position, true = panels at peek position
const [selectionEntered, setSelectionEntered] = useState(false);
const [selectionEntered, setSelectionEntered] = useState(
() => preferredIndex !== null
);
// Tracks the deselect animation timeout so it can be cancelled if the user
// re-selects a panel during the 450ms animation window.
const deselectTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(null);
// True while the reverse animation is playing (deselect → back to equal panels)
const [selectionExiting, setSelectionExiting] = useState(false);
// Measures the overflow-hidden carousel container for responsive preferred-panel sizing.
const [trackContainerW, setTrackContainerW] = useState(0);
const roRef = useRef<ResizeObserver | null>(null);
const trackContainerElRef = useRef<HTMLDivElement | null>(null);
const trackContainerRef = useCallback((el: HTMLDivElement | null) => {
trackContainerElRef.current = el;
if (roRef.current) {
roRef.current.disconnect();
roRef.current = null;
@@ -90,6 +107,9 @@ export default function MultiModelResponseView({
number | null
>(null);
const preferredRoRef = useRef<ResizeObserver | null>(null);
// Refs to each panel wrapper for height animation on deselect
const panelElsRef = useRef<Map<number, HTMLDivElement>>(new Map());
// Tracks which non-preferred panels overflow the preferred height cap
const [overflowingPanels, setOverflowingPanels] = useState<Set<number>>(
new Set()
@@ -152,12 +172,43 @@ export default function MultiModelResponseView({
const handleSelectPreferred = useCallback(
(modelIndex: number) => {
if (isGenerating) return;
// Cancel any pending deselect animation so it doesn't overwrite this selection
if (deselectTimeoutRef.current !== null) {
clearTimeout(deselectTimeoutRef.current);
deselectTimeoutRef.current = null;
setSelectionExiting(false);
}
// Only freeze scroll when entering selection mode for the first time.
// When switching preferred within selection mode, panels are already
// capped and the track just slides — no height changes to worry about.
const alreadyInSelection = preferredIndex !== null;
if (!alreadyInSelection) {
const scrollContainer = trackContainerElRef.current?.closest(
"[data-chat-scroll]"
) as HTMLElement | null;
const scrollTop = scrollContainer?.scrollTop ?? 0;
if (scrollContainer) scrollContainer.style.overflow = "hidden";
setTimeout(() => {
if (scrollContainer) {
scrollContainer.scrollTop = scrollTop;
requestAnimationFrame(() => {
requestAnimationFrame(() => {
if (scrollContainer) {
scrollContainer.scrollTop = scrollTop;
scrollContainer.style.overflow = "";
}
});
});
}
}, 450);
}
setPreferredIndex(modelIndex);
const response = responses.find((r) => r.modelIndex === modelIndex);
if (!response) return;
if (onMessageSelection) {
onMessageSelection(response.nodeId);
}
// Persist preferred response to backend + update local tree so the
// input bar unblocks (awaitingPreferredSelection clears).
@@ -185,17 +236,111 @@ export default function MultiModelResponseView({
[
isGenerating,
responses,
onMessageSelection,
preferredIndex,
parentMessage,
currentSessionId,
updateSessionMessageTree,
]
);
// NOTE: Deselect only clears the local tree — no backend call to clear
// preferred_response_id. The SetPreferredResponseRequest model doesn't
// accept null. A backend endpoint for clearing preference would be needed
// if deselect should persist across reloads.
const handleDeselectPreferred = useCallback(() => {
const scrollContainer = trackContainerElRef.current?.closest(
"[data-chat-scroll]"
) as HTMLElement | null;
// Animate panels back to equal positions, then clear preferred after transition
setSelectionExiting(true);
setSelectionEntered(false);
deselectTimeoutRef.current = setTimeout(() => {
deselectTimeoutRef.current = null;
const scrollTop = scrollContainer?.scrollTop ?? 0;
if (scrollContainer) scrollContainer.style.overflow = "hidden";
// Before clearing state, animate each capped panel's height from
// its current clientHeight to its natural scrollHeight.
const animations: Animation[] = [];
panelElsRef.current.forEach((el, modelIndex) => {
if (modelIndex === preferredIndex) return;
if (hiddenPanels.has(modelIndex)) return;
const from = el.clientHeight;
const to = el.scrollHeight;
if (to <= from) return;
// Lock current height, remove maxHeight cap, then animate
el.style.maxHeight = `${from}px`;
el.style.overflow = "hidden";
const anim = el.animate(
[{ maxHeight: `${from}px` }, { maxHeight: `${to}px` }],
{
duration: 350,
easing: "cubic-bezier(0.2, 0, 0, 1)",
fill: "forwards",
}
);
animations.push(anim);
anim.onfinish = () => {
el.style.maxHeight = "";
el.style.overflow = "";
};
});
setSelectionExiting(false);
setPreferredIndex(null);
// Restore scroll after animations + React settle
const restoreScroll = () => {
requestAnimationFrame(() => {
if (scrollContainer) {
scrollContainer.scrollTop = scrollTop;
scrollContainer.style.overflow = "";
}
});
};
if (animations.length > 0) {
Promise.all(animations.map((a) => a.finished))
.then(restoreScroll)
.catch(restoreScroll);
} else {
restoreScroll();
}
// Clear preferredResponseId in the local tree so input bar re-gates
if (parentMessage && currentSessionId) {
const tree = useChatSessionStore
.getState()
.sessions.get(currentSessionId)?.messageTree;
if (tree) {
const userMsg = tree.get(parentMessage.nodeId);
if (userMsg) {
const updated = new Map(tree);
updated.set(parentMessage.nodeId, {
...userMsg,
preferredResponseId: undefined,
});
updateSessionMessageTree(currentSessionId, updated);
}
}
}
}, 450);
}, [
parentMessage,
currentSessionId,
updateSessionMessageTree,
preferredIndex,
hiddenPanels,
]);
// Clear preferred selection when generation starts
// Reset selection state when generation restarts
useEffect(() => {
if (isGenerating) {
setPreferredIndex(null);
setHasEnteredSelection(false);
setSelectionExiting(false);
}
}, [isGenerating]);
@@ -204,22 +349,39 @@ export default function MultiModelResponseView({
(r) => r.modelIndex === preferredIndex
);
// Selection mode when preferred is set, found in responses, not generating, and at least 2 visible panels
const showSelectionMode =
// Track whether selection mode was ever entered — once it has been,
// we stay in the selection layout (even after deselect) to avoid a
// jarring DOM swap between the two layout strategies.
const [hasEnteredSelection, setHasEnteredSelection] = useState(
() => preferredIndex !== null
);
const isActivelySelected =
preferredIndex !== null &&
preferredIdx !== -1 &&
!isGenerating &&
visibleResponses.length > 1;
// Trigger the slide-out animation one frame after entering selection mode
useEffect(() => {
if (!showSelectionMode) {
setSelectionEntered(false);
if (isActivelySelected) setHasEnteredSelection(true);
}, [isActivelySelected]);
// Use the selection layout once a preferred response has been chosen,
// even after deselect. Only fall through to generation layout before
// the first selection or during active streaming.
const showSelectionMode = isActivelySelected || hasEnteredSelection;
// Trigger the slide-out animation one frame after a preferred panel is selected.
// Uses isActivelySelected (not showSelectionMode) so re-selecting after a
// deselect still triggers the animation.
useEffect(() => {
if (!isActivelySelected) {
// Don't reset selectionEntered here — handleDeselectPreferred manages it
return;
}
const raf = requestAnimationFrame(() => setSelectionEntered(true));
return () => cancelAnimationFrame(raf);
}, [showSelectionMode]);
}, [isActivelySelected]);
// Build panel props — isHidden reflects actual hidden state
const buildPanelProps = useCallback(
@@ -231,6 +393,7 @@ export default function MultiModelResponseView({
isHidden: hiddenPanels.has(response.modelIndex),
isNonPreferredInSelection: isNonPreferred,
onSelect: () => handleSelectPreferred(response.modelIndex),
onDeselect: handleDeselectPreferred,
onToggleVisibility: () => toggleVisibility(response.modelIndex),
agentMessageProps: {
rawPackets: response.packets,
@@ -255,6 +418,7 @@ export default function MultiModelResponseView({
preferredIndex,
hiddenPanels,
handleSelectPreferred,
handleDeselectPreferred,
toggleVisibility,
chatState,
llmManager,
@@ -310,25 +474,30 @@ export default function MultiModelResponseView({
<div
ref={trackContainerRef}
className="w-full overflow-hidden"
style={{
maskImage: `linear-gradient(to right, transparent 0px, black ${PEEK_W}px, black calc(100% - ${PEEK_W}px), transparent 100%)`,
WebkitMaskImage: `linear-gradient(to right, transparent 0px, black ${PEEK_W}px, black calc(100% - ${PEEK_W}px), transparent 100%)`,
}}
style={
isActivelySelected
? {
maskImage: `linear-gradient(to right, transparent 0px, black ${PEEK_W}px, black calc(100% - ${PEEK_W}px), transparent 100%)`,
WebkitMaskImage: `linear-gradient(to right, transparent 0px, black ${PEEK_W}px, black calc(100% - ${PEEK_W}px), transparent 100%)`,
}
: undefined
}
>
<div
className="flex items-start"
style={{
gap: `${PANEL_GAP}px`,
transition: selectionEntered
? "transform 0.45s cubic-bezier(0.2, 0, 0, 1)"
: "none",
transition:
selectionEntered || selectionExiting
? "transform 0.45s cubic-bezier(0.2, 0, 0, 1)"
: "none",
transform: trackTransform,
}}
>
{responses.map((r, i) => {
const isHidden = hiddenPanels.has(r.modelIndex);
const isPref = r.modelIndex === preferredIndex;
const isNonPref = !isHidden && !isPref;
const isNonPref = !isHidden && !isPref && preferredIndex !== null;
const finalW = selectionWidths[i]!;
const startW = isHidden ? HIDDEN_PANEL_W : SELECTION_PANEL_W;
const capped = isNonPref && preferredPanelHeight != null;
@@ -337,6 +506,11 @@ export default function MultiModelResponseView({
<div
key={r.modelIndex}
ref={(el) => {
if (el) {
panelElsRef.current.set(r.modelIndex, el);
} else {
panelElsRef.current.delete(r.modelIndex);
}
if (isPref) preferredPanelRef(el);
if (capped && el) {
const doesOverflow = el.scrollHeight > el.clientHeight;
@@ -353,9 +527,10 @@ export default function MultiModelResponseView({
style={{
width: `${selectionEntered ? finalW : startW}px`,
flexShrink: 0,
transition: selectionEntered
? "width 0.45s cubic-bezier(0.2, 0, 0, 1)"
: "none",
transition:
selectionEntered || selectionExiting
? "width 0.45s cubic-bezier(0.2, 0, 0, 1)"
: "none",
maxHeight: capped ? preferredPanelHeight : undefined,
overflow: capped ? "hidden" : undefined,
position: capped ? "relative" : undefined,
@@ -388,7 +563,7 @@ export default function MultiModelResponseView({
return (
<div className="overflow-x-auto">
<div className="flex gap-6 items-start w-full">
<div className="flex gap-6 items-start justify-center w-full">
{responses.map((r) => {
const isHidden = hiddenPanels.has(r.modelIndex);
return (

View File

@@ -18,7 +18,7 @@ import {
isRecommendedModel,
} from "@/app/craft/onboarding/constants";
import { ToggleWarningModal } from "./ToggleWarningModal";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getModelIcon } from "@/lib/llmConfig";
import { Section } from "@/layouts/general-layouts";
import {
Accordion,

View File

@@ -48,7 +48,7 @@ import NotAllowedModal from "@/app/craft/onboarding/components/NotAllowedModal";
import { useOnboarding } from "@/app/craft/onboarding/BuildOnboardingProvider";
import { useLLMProviders } from "@/hooks/useLLMProviders";
import { useUser } from "@/providers/UserProvider";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getModelIcon } from "@/lib/llmConfig";
import {
getBuildUserPersona,
getPersonaInfo,

View File

@@ -1,5 +1,5 @@
:root {
--app-page-main-content-width: 52.5rem;
--app-page-main-content-width: 45rem;
--block-width-form-input-min: 10rem;
--container-sm: 42rem;

View File

@@ -45,6 +45,9 @@ import { personaIncludesRetrieval } from "@/app/app/services/lib";
import { useQueryController } from "@/providers/QueryControllerProvider";
import { eeGated } from "@/ce";
import EESearchUI from "@/ee/sections/SearchUI";
import useMultiModelChat from "@/hooks/useMultiModelChat";
import ModelSelector from "@/refresh-components/popovers/ModelSelector";
import { Section } from "@/layouts/general-layouts";
const SearchUI = eeGated(EESearchUI);
@@ -105,6 +108,20 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
// If no LLM provider is configured (e.g., fresh signup), the input bar is
// disabled and a "Set up an LLM" button is shown (see bottom of component).
const llmManager = useLlmManager(undefined, liveAgent ?? undefined);
const multiModel = useMultiModelChat(llmManager);
// Sync single-model selection to llmManager so the submission path
// uses the correct provider/version (mirrors AppPage behaviour).
useEffect(() => {
if (multiModel.selectedModels.length === 1) {
const model = multiModel.selectedModels[0]!;
llmManager.updateCurrentLlm({
name: model.name,
provider: model.provider,
modelName: model.modelName,
});
}
}, [multiModel.selectedModels]);
// Deep research toggle
const { deepResearchEnabled, toggleDeepResearch } = useDeepResearchToggle({
@@ -295,12 +312,17 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
// If we already have messages (chat session started), always use chat mode
// (matches AppPage behavior where existing sessions bypass classification)
const selectedModels = multiModel.isMultiModelActive
? multiModel.selectedModels
: undefined;
if (hasMessages) {
onSubmit({
message: submittedMessage,
currentMessageFiles: currentMessageFiles,
deepResearch: deepResearchEnabled,
additionalContext,
selectedModels,
});
return;
}
@@ -312,6 +334,7 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
currentMessageFiles: currentMessageFiles,
deepResearch: deepResearchEnabled,
additionalContext,
selectedModels,
});
};
@@ -328,6 +351,8 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
submitQuery,
tabReadingEnabled,
currentTabUrl,
multiModel.isMultiModelActive,
multiModel.selectedModels,
]
);
@@ -456,6 +481,7 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
onResubmit={handleResubmitLastMessage}
deepResearchEnabled={deepResearchEnabled}
anchorNodeId={anchorNodeId}
selectedModels={multiModel.selectedModels}
/>
</ChatScrollContainer>
</>
@@ -464,7 +490,23 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
{/* Welcome message - centered when no messages and not in search mode */}
{!hasMessages && !isSearch && (
<div className="relative w-full flex-1 flex flex-col items-center justify-end">
<WelcomeMessage isDefaultAgent />
<Section
flexDirection="row"
justifyContent="between"
alignItems="end"
className="max-w-[var(--app-page-main-content-width)]"
>
<WelcomeMessage isDefaultAgent />
{liveAgent && !llmManager.isLoadingProviders && (
<ModelSelector
llmManager={llmManager}
selectedModels={multiModel.selectedModels}
onAdd={multiModel.addModel}
onRemove={multiModel.removeModel}
onReplace={multiModel.replaceModel}
/>
)}
</Section>
<Spacer rem={1.5} />
</div>
)}
@@ -478,6 +520,17 @@ export default function NRFPage({ isSidePanel = false }: NRFPageProps) {
"max-w-[var(--app-page-main-content-width)] px-4"
)}
>
{hasMessages && liveAgent && !llmManager.isLoadingProviders && (
<div className="pb-1">
<ModelSelector
llmManager={llmManager}
selectedModels={multiModel.selectedModels}
onAdd={multiModel.addModel}
onRemove={multiModel.removeModel}
onReplace={multiModel.replaceModel}
/>
</div>
)}
<AppInputBar
ref={chatInputBarRef}
deepResearchEnabled={deepResearchEnabled}

View File

@@ -3,7 +3,7 @@
import { useMemo } from "react";
import { parseLlmDescriptor, structureValue } from "@/lib/llmConfig/utils";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getModelIcon } from "@/lib/llmConfig";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { createIcon } from "@/components/icons/icons";

View File

@@ -144,7 +144,9 @@ export function useAdminLLMProviders() {
*/
export function useWellKnownLLMProvider(providerName: LLMProviderName) {
const { data, error, isLoading } = useSWR<WellKnownLLMProviderDescriptor>(
providerName ? SWR_KEYS.wellKnownLlmProvider(providerName) : null,
providerName && providerName !== LLMProviderName.CUSTOM
? SWR_KEYS.wellKnownLlmProvider(providerName)
: null,
errorHandlingFetcher,
{
revalidateOnFocus: false,

View File

@@ -1,6 +1,6 @@
"use client";
import { useState, useCallback, useEffect, useMemo, useRef } from "react";
import { useState, useCallback, useMemo } from "react";
import {
MAX_MODELS,
SelectedModel,
@@ -40,7 +40,6 @@ export default function useMultiModelChat(
llmManager: LlmManager
): UseMultiModelChatReturn {
const [selectedModels, setSelectedModels] = useState<SelectedModel[]>([]);
const [defaultInitialized, setDefaultInitialized] = useState(false);
// Initialize with the default model from llmManager once providers load
const llmOptions = useMemo(
@@ -49,89 +48,99 @@ export default function useMultiModelChat(
[llmManager.llmProviders]
);
// Sync selectedModels[0] with llmManager.currentLlm when in single-model
// mode. This handles both initial load and session override changes (e.g.
// page reload restores the persisted model after providers load).
// Skip when user has manually added multiple models (multi-model mode).
const selectedModelsRef = useRef(selectedModels);
selectedModelsRef.current = selectedModels;
useEffect(() => {
if (llmOptions.length === 0) return;
// In single-model mode, derive the displayed model directly from
// llmManager.currentLlm so it always stays in sync (no stale state).
// Only use the selectedModels state array when the user has manually
// added multiple models (multi-model mode).
const currentLlmModel = useMemo((): SelectedModel | null => {
if (llmOptions.length === 0) return null;
const { currentLlm } = llmManager;
if (!currentLlm.modelName) return;
const current = selectedModelsRef.current;
// Don't override multi-model selections
if (current.length > 1) return;
// Skip if already showing the correct model
if (
current.length === 1 &&
current[0]!.provider === currentLlm.provider &&
current[0]!.modelName === currentLlm.modelName
) {
return;
}
if (!currentLlm.modelName) return null;
const match = llmOptions.find(
(opt) =>
opt.provider === currentLlm.provider &&
opt.modelName === currentLlm.modelName
);
if (match) {
setSelectedModels([
{
name: match.name,
provider: match.provider,
modelName: match.modelName,
displayName: match.displayName,
},
]);
setDefaultInitialized(true);
}
if (!match) return null;
return {
name: match.name,
provider: match.provider,
modelName: match.modelName,
displayName: match.displayName,
};
}, [llmOptions, llmManager.currentLlm]);
const isMultiModelActive = selectedModels.length > 1;
const addModel = useCallback((model: SelectedModel) => {
setSelectedModels((prev) => {
if (prev.length >= MAX_MODELS) return prev;
if (
prev.some(
(m) =>
m.provider === model.provider && m.modelName === model.modelName
)
) {
return prev;
}
return [...prev, model];
});
}, []);
// Expose the effective selection: multi-model state when active,
// otherwise the single model derived from llmManager.
const effectiveSelectedModels = useMemo(
() =>
isMultiModelActive
? selectedModels
: currentLlmModel
? [currentLlmModel]
: [],
[isMultiModelActive, selectedModels, currentLlmModel]
);
const addModel = useCallback(
(model: SelectedModel) => {
setSelectedModels((prev) => {
// When in effective single-model mode (prev <= 1), always re-seed from
// the current derived model so stale state from a prior remove doesn't persist.
const base =
prev.length <= 1 && currentLlmModel ? [currentLlmModel] : prev;
if (base.length >= MAX_MODELS) return base;
if (
base.some(
(m) =>
m.provider === model.provider && m.modelName === model.modelName
)
) {
return base;
}
return [...base, model];
});
},
[currentLlmModel]
);
const removeModel = useCallback((index: number) => {
setSelectedModels((prev) => prev.filter((_, i) => i !== index));
}, []);
const replaceModel = useCallback((index: number, model: SelectedModel) => {
setSelectedModels((prev) => {
// Don't replace with a model that's already selected elsewhere
if (
prev.some(
(m, i) =>
i !== index &&
m.provider === model.provider &&
m.modelName === model.modelName
)
) {
return prev;
const replaceModel = useCallback(
(index: number, model: SelectedModel) => {
// In single-model mode, update llmManager directly so currentLlm
// (and thus effectiveSelectedModels) reflects the change immediately.
if (!isMultiModelActive) {
llmManager.updateCurrentLlm({
name: model.name,
provider: model.provider,
modelName: model.modelName,
});
return;
}
const next = [...prev];
next[index] = model;
return next;
});
}, []);
setSelectedModels((prev) => {
// Don't replace with a model that's already selected elsewhere
if (
prev.some(
(m, i) =>
i !== index &&
m.provider === model.provider &&
m.modelName === model.modelName
)
) {
return prev;
}
const next = [...prev];
next[index] = model;
return next;
});
},
[isMultiModelActive, llmManager]
);
const clearModels = useCallback(() => {
setSelectedModels([]);
@@ -161,7 +170,6 @@ export default function useMultiModelChat(
}
if (restored.length >= 2) {
setSelectedModels(restored.slice(0, MAX_MODELS));
setDefaultInitialized(true);
}
},
[llmOptions]
@@ -191,15 +199,15 @@ export default function useMultiModelChat(
);
const buildLlmOverrides = useCallback((): LLMOverride[] => {
return selectedModels.map((m) => ({
return effectiveSelectedModels.map((m) => ({
model_provider: m.name,
model_version: m.modelName,
display_name: m.displayName,
}));
}, [selectedModels]);
}, [effectiveSelectedModels]);
return {
selectedModels,
selectedModels: effectiveSelectedModels,
isMultiModelActive,
addModel,
removeModel,

View File

@@ -671,7 +671,8 @@ export function useLlmManager(
const [userHasManuallyOverriddenLLM, setUserHasManuallyOverriddenLLM] =
useState(false);
const [chatSession, setChatSession] = useState<ChatSession | null>(null);
const [currentLlm, setCurrentLlm] = useState<LlmDescriptor>({
// Manual override value — only used when userHasManuallyOverriddenLLM is true
const [manualLlm, setManualLlm] = useState<LlmDescriptor>({
name: "",
provider: "",
modelName: "",
@@ -693,55 +694,77 @@ export function useLlmManager(
prevAgentIdRef.current = liveAgent?.id;
}, [liveAgent?.id]);
const llmUpdate = () => {
/* Should be called when the live assistant or current chat session changes */
// Don't update if providers haven't loaded yet (undefined/null)
// Empty arrays are valid (user has no provider access for this assistant)
if (llmProviders === undefined || llmProviders === null) {
return;
}
// separate function so we can `return` to break out
const _llmUpdate = () => {
// if the user has overridden in this session and just switched to a brand
// new session, use their manually specified model
if (userHasManuallyOverriddenLLM && !currentChatSession) {
return;
}
if (currentChatSession?.current_alternate_model) {
setCurrentLlm(
getValidLlmDescriptor(currentChatSession.current_alternate_model)
);
} else if (liveAgent?.llm_model_version_override) {
setCurrentLlm(
getValidLlmDescriptor(liveAgent.llm_model_version_override)
);
} else if (userHasManuallyOverriddenLLM) {
// if the user has an override and there's nothing special about the
// current chat session, use the override
return;
} else if (user?.preferences?.default_model) {
setCurrentLlm(getValidLlmDescriptor(user.preferences.default_model));
} else {
const defaultLlm = getDefaultLlmDescriptor(llmProviders, defaultText);
if (defaultLlm) {
setCurrentLlm(defaultLlm);
}
}
};
_llmUpdate();
setChatSession(currentChatSession || null);
};
function getValidLlmDescriptor(
modelName: string | null | undefined
): LlmDescriptor {
return getValidLlmDescriptorForProviders(modelName, llmProviders);
}
// Compute the resolved LLM synchronously so it's never one render behind.
// This replaces the old llmUpdate() effect for model resolution.
// Wrapped with a ref for referential stability — returns the same object
// when the resolved name/provider/modelName haven't actually changed,
// preventing unnecessary re-creation of downstream callbacks (e.g. onSubmit).
const prevLlmRef = useRef<LlmDescriptor>({
name: "",
provider: "",
modelName: "",
});
const currentLlm = useMemo((): LlmDescriptor => {
let resolved: LlmDescriptor;
if (llmProviders === undefined || llmProviders === null) {
resolved = manualLlm;
} else if (userHasManuallyOverriddenLLM && !currentChatSession) {
// User has overridden in this session and switched to a new session
resolved = manualLlm;
} else if (currentChatSession?.current_alternate_model) {
resolved = getValidLlmDescriptorForProviders(
currentChatSession.current_alternate_model,
llmProviders
);
} else if (liveAgent?.llm_model_version_override) {
resolved = getValidLlmDescriptorForProviders(
liveAgent.llm_model_version_override,
llmProviders
);
} else if (userHasManuallyOverriddenLLM) {
resolved = manualLlm;
} else if (user?.preferences?.default_model) {
resolved = getValidLlmDescriptorForProviders(
user.preferences.default_model,
llmProviders
);
} else {
resolved =
getDefaultLlmDescriptor(llmProviders, defaultText) ?? manualLlm;
}
const prev = prevLlmRef.current;
if (
prev.name === resolved.name &&
prev.provider === resolved.provider &&
prev.modelName === resolved.modelName
) {
return prev;
}
prevLlmRef.current = resolved;
return resolved;
}, [
llmProviders,
defaultText,
currentChatSession,
liveAgent?.llm_model_version_override,
userHasManuallyOverriddenLLM,
manualLlm,
user?.preferences?.default_model,
]);
// Keep chatSession state in sync (used by temperature effect)
useEffect(() => {
setChatSession(currentChatSession || null);
}, [currentChatSession]);
const [imageFilesPresent, setImageFilesPresent] = useState(false);
const updateImageFilesPresent = (present: boolean) => {
@@ -750,18 +773,18 @@ export function useLlmManager(
// Manually set the LLM
const updateCurrentLlm = (newLlm: LlmDescriptor) => {
setCurrentLlm(newLlm);
setManualLlm(newLlm);
setUserHasManuallyOverriddenLLM(true);
};
const updateCurrentLlmToModelName = (modelName: string) => {
setCurrentLlm(getValidLlmDescriptor(modelName));
setManualLlm(getValidLlmDescriptor(modelName));
setUserHasManuallyOverriddenLLM(true);
};
const updateModelOverrideBasedOnChatSession = (chatSession?: ChatSession) => {
if (chatSession && chatSession.current_alternate_model?.length > 0) {
setCurrentLlm(getValidLlmDescriptor(chatSession.current_alternate_model));
setManualLlm(getValidLlmDescriptor(chatSession.current_alternate_model));
}
};
@@ -811,8 +834,6 @@ export function useLlmManager(
}, [currentLlm]);
useEffect(() => {
llmUpdate();
if (!chatSession && currentChatSession) {
if (temperature) {
updateTemperatureOverrideForChatSession(

View File

@@ -0,0 +1,251 @@
import type { IconFunctionComponent } from "@opal/types";
import { SvgCpu, SvgPlug, SvgServer } from "@opal/icons";
import {
SvgBifrost,
SvgOpenai,
SvgClaude,
SvgOllama,
SvgAws,
SvgOpenrouter,
SvgAzure,
SvgGemini,
SvgLitellm,
SvgLmStudio,
SvgMicrosoft,
SvgMistral,
SvgDeepseek,
SvgQwen,
SvgGoogle,
} from "@opal/logos";
import { ZAIIcon } from "@/components/icons/icons";
import { LLMProviderFormProps, LLMProviderName } from "@/interfaces/llm";
import type { LLMProviderView } from "@/interfaces/llm";
import OpenAIModal from "@/sections/modals/llmConfig/OpenAIModal";
import AnthropicModal from "@/sections/modals/llmConfig/AnthropicModal";
import OllamaModal from "@/sections/modals/llmConfig/OllamaModal";
import AzureModal from "@/sections/modals/llmConfig/AzureModal";
import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
import VertexAIModal from "@/sections/modals/llmConfig/VertexAIModal";
import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import LMStudioModal from "@/sections/modals/llmConfig/LMStudioModal";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
// ─── Text (LLM) providers ────────────────────────────────────────────────────
export interface ProviderEntry {
icon: IconFunctionComponent;
productName: string;
companyName: string;
Modal: React.ComponentType<LLMProviderFormProps>;
}
const PROVIDERS: Record<string, ProviderEntry> = {
[LLMProviderName.OPENAI]: {
icon: SvgOpenai,
productName: "GPT",
companyName: "OpenAI",
Modal: OpenAIModal,
},
[LLMProviderName.ANTHROPIC]: {
icon: SvgClaude,
productName: "Claude",
companyName: "Anthropic",
Modal: AnthropicModal,
},
[LLMProviderName.VERTEX_AI]: {
icon: SvgGemini,
productName: "Gemini",
companyName: "Google Cloud Vertex AI",
Modal: VertexAIModal,
},
[LLMProviderName.BEDROCK]: {
icon: SvgAws,
productName: "Amazon Bedrock",
companyName: "AWS",
Modal: BedrockModal,
},
[LLMProviderName.AZURE]: {
icon: SvgAzure,
productName: "Azure OpenAI",
companyName: "Microsoft Azure",
Modal: AzureModal,
},
[LLMProviderName.LITELLM]: {
icon: SvgLitellm,
productName: "LiteLLM",
companyName: "LiteLLM",
Modal: CustomModal,
},
[LLMProviderName.LITELLM_PROXY]: {
icon: SvgLitellm,
productName: "LiteLLM Proxy",
companyName: "LiteLLM Proxy",
Modal: LiteLLMProxyModal,
},
[LLMProviderName.OLLAMA_CHAT]: {
icon: SvgOllama,
productName: "Ollama",
companyName: "Ollama",
Modal: OllamaModal,
},
[LLMProviderName.OPENROUTER]: {
icon: SvgOpenrouter,
productName: "OpenRouter",
companyName: "OpenRouter",
Modal: OpenRouterModal,
},
[LLMProviderName.LM_STUDIO]: {
icon: SvgLmStudio,
productName: "LM Studio",
companyName: "LM Studio",
Modal: LMStudioModal,
},
[LLMProviderName.BIFROST]: {
icon: SvgBifrost,
productName: "Bifrost",
companyName: "Bifrost",
Modal: BifrostModal,
},
[LLMProviderName.OPENAI_COMPATIBLE]: {
icon: SvgPlug,
productName: "OpenAI-Compatible",
companyName: "OpenAI-Compatible",
Modal: OpenAICompatibleModal,
},
[LLMProviderName.CUSTOM]: {
icon: SvgServer,
productName: "Custom Models",
companyName: "models from other LiteLLM-compatible providers",
Modal: CustomModal,
},
};
const DEFAULT_ENTRY: ProviderEntry = {
icon: SvgCpu,
productName: "",
companyName: "",
Modal: CustomModal,
};
// Providers that don't use custom_config themselves — if custom_config is
// present it means the provider was originally created via CustomModal.
const CUSTOM_CONFIG_OVERRIDES = new Set<string>([
LLMProviderName.OPENAI,
LLMProviderName.ANTHROPIC,
LLMProviderName.AZURE,
LLMProviderName.OPENROUTER,
]);
export function getProvider(
providerName: string,
existingProvider?: LLMProviderView
): ProviderEntry {
const entry = PROVIDERS[providerName] ?? {
...DEFAULT_ENTRY,
productName: providerName,
companyName: providerName,
};
if (
existingProvider?.custom_config != null &&
CUSTOM_CONFIG_OVERRIDES.has(providerName)
) {
return { ...entry, Modal: CustomModal };
}
return entry;
}
// ─── Aggregator providers ────────────────────────────────────────────────────
// Providers that host models from multiple vendors (e.g. Bedrock hosts Claude,
// Llama, etc.) Used by the model-icon resolver to prioritise vendor icons.
export const AGGREGATOR_PROVIDERS = new Set([
LLMProviderName.BEDROCK,
"bedrock_converse",
LLMProviderName.OPENROUTER,
LLMProviderName.OLLAMA_CHAT,
LLMProviderName.LM_STUDIO,
LLMProviderName.LITELLM_PROXY,
LLMProviderName.BIFROST,
LLMProviderName.OPENAI_COMPATIBLE,
LLMProviderName.VERTEX_AI,
]);
// ─── Model-aware icon resolver ───────────────────────────────────────────────
const MODEL_ICON_MAP: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
[LLMProviderName.VERTEX_AI]: SvgGemini,
[LLMProviderName.BEDROCK]: SvgAws,
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
[LLMProviderName.BIFROST]: SvgBifrost,
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
amazon: SvgAws,
phi: SvgMicrosoft,
mistral: SvgMistral,
ministral: SvgMistral,
llama: SvgCpu,
ollama: SvgOllama,
gemini: SvgGemini,
deepseek: SvgDeepseek,
claude: SvgClaude,
azure: SvgAzure,
microsoft: SvgMicrosoft,
meta: SvgCpu,
google: SvgGoogle,
qwen: SvgQwen,
qwq: SvgQwen,
zai: ZAIIcon,
bedrock_converse: SvgAws,
};
/**
* Model-aware icon resolver that checks both provider name and model name
* to pick the most specific icon (e.g. Claude icon for a Bedrock Claude model).
*/
export function getModelIcon(
providerName: string,
modelName?: string
): IconFunctionComponent {
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers, prioritise showing the vendor icon based on model name
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in MODEL_ICON_MAP) {
const icon = MODEL_ICON_MAP[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return SvgCpu;
}

View File

@@ -1,176 +0,0 @@
import type { IconFunctionComponent } from "@opal/types";
import { SvgCpu, SvgPlug, SvgServer } from "@opal/icons";
import {
SvgBifrost,
SvgOpenai,
SvgClaude,
SvgOllama,
SvgAws,
SvgOpenrouter,
SvgAzure,
SvgGemini,
SvgLitellm,
SvgLmStudio,
SvgMicrosoft,
SvgMistral,
SvgDeepseek,
SvgQwen,
SvgGoogle,
} from "@opal/logos";
import { ZAIIcon } from "@/components/icons/icons";
import { LLMProviderName } from "@/interfaces/llm";
export const AGGREGATOR_PROVIDERS = new Set([
LLMProviderName.BEDROCK,
"bedrock_converse",
LLMProviderName.OPENROUTER,
LLMProviderName.OLLAMA_CHAT,
LLMProviderName.LM_STUDIO,
LLMProviderName.LITELLM_PROXY,
LLMProviderName.BIFROST,
LLMProviderName.OPENAI_COMPATIBLE,
LLMProviderName.VERTEX_AI,
]);
const PROVIDER_ICONS: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
[LLMProviderName.VERTEX_AI]: SvgGemini,
[LLMProviderName.BEDROCK]: SvgAws,
[LLMProviderName.AZURE]: SvgAzure,
[LLMProviderName.LITELLM]: SvgLitellm,
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
[LLMProviderName.BIFROST]: SvgBifrost,
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
// fallback
[LLMProviderName.CUSTOM]: SvgServer,
};
const PROVIDER_PRODUCT_NAMES: Record<string, string> = {
[LLMProviderName.OPENAI]: "GPT",
[LLMProviderName.ANTHROPIC]: "Claude",
[LLMProviderName.VERTEX_AI]: "Gemini",
[LLMProviderName.BEDROCK]: "Amazon Bedrock",
[LLMProviderName.AZURE]: "Azure OpenAI",
[LLMProviderName.LITELLM]: "LiteLLM",
[LLMProviderName.LITELLM_PROXY]: "LiteLLM Proxy",
[LLMProviderName.OLLAMA_CHAT]: "Ollama",
[LLMProviderName.OPENROUTER]: "OpenRouter",
[LLMProviderName.LM_STUDIO]: "LM Studio",
[LLMProviderName.BIFROST]: "Bifrost",
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI Compatible",
// fallback
[LLMProviderName.CUSTOM]: "Custom Models",
};
const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
[LLMProviderName.OPENAI]: "OpenAI",
[LLMProviderName.ANTHROPIC]: "Anthropic",
[LLMProviderName.VERTEX_AI]: "Google Cloud Vertex AI",
[LLMProviderName.BEDROCK]: "AWS",
[LLMProviderName.AZURE]: "Microsoft Azure",
[LLMProviderName.LITELLM]: "LiteLLM",
[LLMProviderName.LITELLM_PROXY]: "LiteLLM Proxy",
[LLMProviderName.OLLAMA_CHAT]: "Ollama",
[LLMProviderName.OPENROUTER]: "OpenRouter",
[LLMProviderName.LM_STUDIO]: "LM Studio",
[LLMProviderName.BIFROST]: "Bifrost",
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI Compatible",
// fallback
[LLMProviderName.CUSTOM]: "Other providers or self-hosted",
};
export function getProviderProductName(providerName: string): string {
return PROVIDER_PRODUCT_NAMES[providerName] ?? providerName;
}
export function getProviderDisplayName(providerName: string): string {
return PROVIDER_DISPLAY_NAMES[providerName] ?? providerName;
}
export function getProviderIcon(providerName: string): IconFunctionComponent {
return PROVIDER_ICONS[providerName] ?? SvgCpu;
}
// ---------------------------------------------------------------------------
// Model-aware icon resolver (legacy icon set)
// ---------------------------------------------------------------------------
const MODEL_ICON_MAP: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
[LLMProviderName.VERTEX_AI]: SvgGemini,
[LLMProviderName.BEDROCK]: SvgAws,
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
[LLMProviderName.BIFROST]: SvgBifrost,
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
amazon: SvgAws,
phi: SvgMicrosoft,
mistral: SvgMistral,
ministral: SvgMistral,
llama: SvgCpu,
ollama: SvgOllama,
gemini: SvgGemini,
deepseek: SvgDeepseek,
claude: SvgClaude,
azure: SvgAzure,
microsoft: SvgMicrosoft,
meta: SvgCpu,
google: SvgGoogle,
qwen: SvgQwen,
qwq: SvgQwen,
zai: ZAIIcon,
bedrock_converse: SvgAws,
};
/**
* Model-aware icon resolver that checks both provider name and model name
* to pick the most specific icon (e.g. Claude icon for a Bedrock Claude model).
*/
export const getModelIcon = (
providerName: string,
modelName?: string
): IconFunctionComponent => {
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers, prioritise showing the vendor icon based on model name
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in MODEL_ICON_MAP) {
const icon = MODEL_ICON_MAP[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return SvgCpu;
};

View File

@@ -44,7 +44,7 @@ export function getFinalLLM(
return [provider, model];
}
export function getLLMProviderOverrideForPersona(
export function getProviderOverrideForPersona(
liveAgent: MinimalPersonaSnapshot,
llmProviders: LLMProviderDescriptor[]
): LlmDescriptor | null {
@@ -144,7 +144,7 @@ export function getDisplayName(
agent: MinimalPersonaSnapshot,
llmProviders: LLMProviderDescriptor[]
): string | undefined {
const llmDescriptor = getLLMProviderOverrideForPersona(
const llmDescriptor = getProviderOverrideForPersona(
agent,
llmProviders ?? []
);

View File

@@ -22,7 +22,7 @@ import {
SvgImage,
SvgLoader,
SvgMoreHorizontal,
SvgPaperclip,
SvgUploadSquare,
} from "@opal/icons";
const getFileExtension = (fileName: string): string => {
const idx = fileName.lastIndexOf(".");
@@ -125,7 +125,7 @@ function FilePickerPopoverContents({
// Action button to upload more files
<LineItem
key="upload-files"
icon={SvgPaperclip}
icon={SvgUploadSquare}
description="Upload a file from your device"
onClick={triggerUploadPicker}
>

View File

@@ -4,7 +4,7 @@ import { useState, useEffect, useCallback, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmDescriptor, LlmManager } from "@/lib/hooks";
import { structureValue } from "@/lib/llmConfig/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getModelIcon } from "@/lib/llmConfig";
import { AGGREGATOR_PROVIDERS } from "@/lib/llmConfig/svc";
import { Slider } from "@/components/ui/slider";

View File

@@ -3,7 +3,7 @@
import { useState, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmManager } from "@/lib/hooks";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getModelIcon } from "@/lib/llmConfig";
import { Button, SelectButton, OpenButton } from "@opal/components";
import { SvgPlusCircle, SvgX } from "@opal/icons";
import { LLMOption } from "@/refresh-components/popovers/interfaces";
@@ -104,6 +104,7 @@ export default function ModelSelector({
onRemove(existingIndex);
} else if (!atMax) {
onAdd(model);
setOpen(false);
}
};
@@ -214,15 +215,17 @@ export default function ModelSelector({
)}
</div>
<Popover.Content side="top" align="end" width="lg">
<ModelListContent
llmProviders={llmManager.llmProviders}
isLoading={llmManager.isLoadingProviders}
onSelect={handleSelect}
isSelected={isSelected}
isDisabled={isDisabled}
/>
</Popover.Content>
{!(atMax && replacingIndex === null) && (
<Popover.Content side="top" align="end" width="lg">
<ModelListContent
llmProviders={llmManager.llmProviders}
isLoading={llmManager.isLoadingProviders}
onSelect={handleSelect}
isSelected={isSelected}
isDisabled={isDisabled}
/>
</Popover.Content>
)}
</Popover>
);
}

View File

@@ -400,19 +400,22 @@ export default function AppPage({ firstMessage }: ChatPageProps) {
const multiModel = useMultiModelChat(llmManager);
// Auto-fold sidebar when multi-model is active (panels need full width)
// Auto-fold sidebar when a multi-model message is submitted.
// Stays collapsed until the user exits multi-model mode (removes models).
const { folded: sidebarFolded, setFolded: setSidebarFolded } =
useSidebarState();
const preMultiModelFoldedRef = useRef<boolean | null>(null);
useEffect(() => {
if (
multiModel.isMultiModelActive &&
preMultiModelFoldedRef.current === null
) {
const foldSidebarForMultiModel = useCallback(() => {
if (preMultiModelFoldedRef.current === null) {
preMultiModelFoldedRef.current = sidebarFolded;
setSidebarFolded(true);
} else if (
}
}, [sidebarFolded, setSidebarFolded]);
// Restore sidebar when user exits multi-model mode
useEffect(() => {
if (
!multiModel.isMultiModelActive &&
preMultiModelFoldedRef.current !== null
) {
@@ -532,6 +535,9 @@ export default function AppPage({ firstMessage }: ChatPageProps) {
const onChat = useCallback(
(message: string) => {
if (multiModel.isMultiModelActive) {
foldSidebarForMultiModel();
}
resetInputBar();
onSubmit({
message,
@@ -552,6 +558,7 @@ export default function AppPage({ firstMessage }: ChatPageProps) {
deepResearchEnabledForCurrentWorkflow,
multiModel.isMultiModelActive,
multiModel.selectedModels,
foldSidebarForMultiModel,
showOnboarding,
onboardingDismissed,
finishOnboarding,
@@ -864,13 +871,15 @@ export default function AppPage({ firstMessage }: ChatPageProps) {
agent={liveAgent}
isDefaultAgent={isDefaultAgent}
/>
<ModelSelector
llmManager={llmManager}
selectedModels={multiModel.selectedModels}
onAdd={multiModel.addModel}
onRemove={multiModel.removeModel}
onReplace={multiModel.replaceModel}
/>
{liveAgent && !llmManager.isLoadingProviders && (
<ModelSelector
llmManager={llmManager}
selectedModels={multiModel.selectedModels}
onAdd={multiModel.addModel}
onRemove={multiModel.removeModel}
onReplace={multiModel.replaceModel}
/>
)}
</Section>
<Spacer rem={1.5} />
</Fade>
@@ -936,17 +945,19 @@ export default function AppPage({ firstMessage }: ChatPageProps) {
isSearch ? "h-[14px]" : "h-0"
)}
/>
{appFocus.isChat() && (
<div className="pb-1">
<ModelSelector
llmManager={llmManager}
selectedModels={multiModel.selectedModels}
onAdd={multiModel.addModel}
onRemove={multiModel.removeModel}
onReplace={multiModel.replaceModel}
/>
</div>
)}
{appFocus.isChat() &&
liveAgent &&
!llmManager.isLoadingProviders && (
<div className="pb-1">
<ModelSelector
llmManager={llmManager}
selectedModels={multiModel.selectedModels}
onAdd={multiModel.addModel}
onRemove={multiModel.removeModel}
onReplace={multiModel.replaceModel}
/>
</div>
)}
<AppInputBar
ref={chatInputBarRef}
deepResearchEnabled={

View File

@@ -15,11 +15,7 @@ import { SvgArrowExchange, SvgSettings, SvgTrash } from "@opal/icons";
import * as SettingsLayouts from "@/layouts/settings-layouts";
import { ADMIN_ROUTES } from "@/lib/admin-routes";
import * as GeneralLayouts from "@/layouts/general-layouts";
import {
getProviderDisplayName,
getProviderIcon,
getProviderProductName,
} from "@/lib/llmConfig/providers";
import { getProvider } from "@/lib/llmConfig";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { deleteLlmProvider, setDefaultLlmModel } from "@/lib/llmConfig/svc";
import { Horizontal as HorizontalInput } from "@/layouts/input-layouts";
@@ -33,19 +29,6 @@ import {
LLMProviderView,
WellKnownLLMProviderDescriptor,
} from "@/interfaces/llm";
import { getModalForExistingProvider } from "@/sections/modals/llmConfig/getModal";
import OpenAIModal from "@/sections/modals/llmConfig/OpenAIModal";
import AnthropicModal from "@/sections/modals/llmConfig/AnthropicModal";
import OllamaModal from "@/sections/modals/llmConfig/OllamaModal";
import AzureModal from "@/sections/modals/llmConfig/AzureModal";
import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
import VertexAIModal from "@/sections/modals/llmConfig/VertexAIModal";
import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import LMStudioModal from "@/sections/modals/llmConfig/LMStudioModal";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
import { Section } from "@/layouts/general-layouts";
import { markdown } from "@opal/utils";
@@ -72,51 +55,6 @@ const PROVIDER_DISPLAY_ORDER: string[] = [
LLMProviderName.OPENAI_COMPATIBLE,
];
const PROVIDER_MODAL_MAP: Record<
string,
(
shouldMarkAsDefault: boolean,
onOpenChange: (open: boolean) => void
) => React.ReactNode
> = {
openai: (d, onOpenChange) => (
<OpenAIModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
anthropic: (d, onOpenChange) => (
<AnthropicModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
ollama_chat: (d, onOpenChange) => (
<OllamaModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
azure: (d, onOpenChange) => (
<AzureModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
bedrock: (d, onOpenChange) => (
<BedrockModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
vertex_ai: (d, onOpenChange) => (
<VertexAIModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
openrouter: (d, onOpenChange) => (
<OpenRouterModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
lm_studio: (d, onOpenChange) => (
<LMStudioModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
litellm_proxy: (d, onOpenChange) => (
<LiteLLMProxyModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
bifrost: (d, onOpenChange) => (
<BifrostModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
openai_compatible: (d, onOpenChange) => (
<OpenAICompatibleModal
shouldMarkAsDefault={d}
onOpenChange={onOpenChange}
/>
),
};
// ============================================================================
// ExistingProviderCard — card for configured (existing) providers
// ============================================================================
@@ -125,14 +63,12 @@ interface ExistingProviderCardProps {
provider: LLMProviderView;
isDefault: boolean;
isLastProvider: boolean;
defaultModelName?: string;
}
function ExistingProviderCard({
provider,
isDefault,
isLastProvider,
defaultModelName,
}: ExistingProviderCardProps) {
const { mutate } = useSWRConfig();
const [isOpen, setIsOpen] = useState(false);
@@ -150,8 +86,14 @@ function ExistingProviderCard({
}
};
const { icon, companyName, Modal } = getProvider(provider.provider, provider);
return (
<>
{isOpen && (
<Modal existingLlmProvider={provider} onOpenChange={setIsOpen} />
)}
{deleteModal.isOpen && (
<ConfirmationModalLayout
icon={SvgTrash}
@@ -202,9 +144,9 @@ function ExistingProviderCard({
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
icon={getProviderIcon(provider.provider)}
icon={icon}
title={provider.name}
description={getProviderDisplayName(provider.provider)}
description={companyName}
sizePreset="main-ui"
variant="section"
tag={isDefault ? { title: "Default", color: "blue" } : undefined}
@@ -236,8 +178,6 @@ function ExistingProviderCard({
</div>
}
/>
{isOpen &&
getModalForExistingProvider(provider, setIsOpen, defaultModelName)}
</SelectCard>
</Hoverable.Root>
</>
@@ -251,18 +191,11 @@ function ExistingProviderCard({
interface NewProviderCardProps {
provider: WellKnownLLMProviderDescriptor;
isFirstProvider: boolean;
formFn: (
shouldMarkAsDefault: boolean,
onOpenChange: (open: boolean) => void
) => React.ReactNode;
}
function NewProviderCard({
provider,
isFirstProvider,
formFn,
}: NewProviderCardProps) {
function NewProviderCard({ provider, isFirstProvider }: NewProviderCardProps) {
const [isOpen, setIsOpen] = useState(false);
const { icon, productName, companyName, Modal } = getProvider(provider.name);
return (
<SelectCard
@@ -272,9 +205,9 @@ function NewProviderCard({
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
icon={getProviderIcon(provider.name)}
title={getProviderProductName(provider.name)}
description={getProviderDisplayName(provider.name)}
icon={icon}
title={productName}
description={companyName}
sizePreset="main-ui"
variant="section"
rightChildren={
@@ -290,7 +223,9 @@ function NewProviderCard({
</Button>
}
/>
{isOpen && formFn(isFirstProvider, setIsOpen)}
{isOpen && (
<Modal shouldMarkAsDefault={isFirstProvider} onOpenChange={setIsOpen} />
)}
</SelectCard>
);
}
@@ -307,6 +242,7 @@ function NewCustomProviderCard({
isFirstProvider,
}: NewCustomProviderCardProps) {
const [isOpen, setIsOpen] = useState(false);
const { icon, productName, companyName, Modal } = getProvider("custom");
return (
<SelectCard
@@ -316,9 +252,9 @@ function NewCustomProviderCard({
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
icon={getProviderIcon("custom")}
title={getProviderProductName("custom")}
description={getProviderDisplayName("custom")}
icon={icon}
title={productName}
description={companyName}
sizePreset="main-ui"
variant="section"
rightChildren={
@@ -335,10 +271,7 @@ function NewCustomProviderCard({
}
/>
{isOpen && (
<CustomModal
shouldMarkAsDefault={isFirstProvider}
onOpenChange={setIsOpen}
/>
<Modal shouldMarkAsDefault={isFirstProvider} onOpenChange={setIsOpen} />
)}
</SelectCard>
);
@@ -348,7 +281,7 @@ function NewCustomProviderCard({
// LLMConfigurationPage — main page component
// ============================================================================
export default function LLMProviderConfigurationPage() {
export default function LLMConfigurationPage() {
const { mutate } = useSWRConfig();
const { llmProviders: existingLlmProviders, defaultText } =
useAdminLLMProviders();
@@ -469,11 +402,6 @@ export default function LLMProviderConfigurationPage() {
provider={provider}
isDefault={defaultText?.provider_id === provider.id}
isLastProvider={sortedProviders.length === 1}
defaultModelName={
defaultText?.provider_id === provider.id
? defaultText.model_name
: undefined
}
/>
))}
</div>
@@ -507,23 +435,13 @@ export default function LLMProviderConfigurationPage() {
(bIndex === -1 ? Infinity : bIndex)
);
})
.map((provider) => {
const formFn = PROVIDER_MODAL_MAP[provider.name];
if (!formFn) {
toast.error(
`No modal mapping for provider "${provider.name}".`
);
return null;
}
return (
<NewProviderCard
key={provider.name}
provider={provider}
isFirstProvider={isFirstProvider}
formFn={formFn}
/>
);
})}
.map((provider) => (
<NewProviderCard
key={provider.name}
provider={provider}
isFirstProvider={isFirstProvider}
/>
))}
<NewCustomProviderCard isFirstProvider={isFirstProvider} />
</div>
</GeneralLayouts.Section>

View File

@@ -352,6 +352,7 @@ const ChatScrollContainer = React.memo(
key={sessionId}
ref={scrollContainerRef}
data-testid="chat-scroll-container"
data-chat-scroll
className={cn(
"flex flex-col flex-1 min-h-0 overflow-y-auto overflow-x-hidden",
hideScrollbar ? "no-scrollbar" : "default-scrollbar"

View File

@@ -44,8 +44,8 @@ import {
SvgGlobe,
SvgHourglass,
SvgMicrophone,
SvgPaperclip,
SvgPlus,
SvgPlusCircle,
SvgSearch,
SvgStop,
SvgX,
@@ -507,7 +507,7 @@ const AppInputBar = React.memo(
trigger={(open) => (
<Button
disabled={disabled}
icon={SvgPlusCircle}
icon={SvgPaperclip}
tooltip="Attach Files"
interaction={open ? "hover" : "rest"}
prominence="tertiary"

View File

@@ -87,7 +87,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
await user.type(nameInput, options.name);
// Select provider from the combo box dropdown
const providerInput = screen.getByPlaceholderText("Select a provider");
const providerInput = screen.getByPlaceholderText(
"Provider ID string as shown on LiteLLM"
);
await user.click(providerInput);
const providerOption = await screen.findByRole("option", {
name: new RegExp(options.provider, "i"),
@@ -524,7 +526,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
await user.type(nameInput, "Cloudflare Provider");
// Select provider from the combo box dropdown
const providerInput = screen.getByPlaceholderText("Select a provider");
const providerInput = screen.getByPlaceholderText(
"Provider ID string as shown on LiteLLM"
);
await user.click(providerInput);
const providerOption = await screen.findByRole("option", {
name: /cloudflare/i,
@@ -538,7 +542,9 @@ describe("Custom LLM Provider Configuration Workflow", () => {
await user.click(addLineButton);
// Fill in custom config key-value pair
const keyInputs = screen.getAllByRole("textbox", { name: /Key \d+/ });
const keyInputs = screen.getAllByRole("textbox", {
name: /e\.g\. OPENAI_ORGANIZATION \d+/,
});
const valueInputs = screen.getAllByRole("textbox", { name: /Value \d+/ });
await user.type(keyInputs[0]!, "CLOUDFLARE_ACCOUNT_ID");

View File

@@ -184,6 +184,7 @@ function CustomConfigKeyValue() {
return (
<KeyValueInput
items={formikProps.values.custom_config_list}
keyPlaceholder="e.g. OPENAI_ORGANIZATION"
onChange={(items) =>
formikProps.setFieldValue("custom_config_list", items)
}
@@ -213,7 +214,7 @@ function ProviderNameSelect({ disabled }: { disabled?: boolean }) {
value={values.provider}
onValueChange={(value) => setFieldValue("provider", value)}
options={options}
placeholder="Select a provider"
placeholder="Provider ID string as shown on LiteLLM"
disabled={disabled}
createPrefix="Use"
dropdownMaxHeight="60vh"
@@ -307,6 +308,7 @@ export default function CustomModal({
onClose={onClose}
initialValues={initialValues}
validationSchema={validationSchema}
description="Connect models from other LiteLLM-compatible providers."
onSubmit={async (values, { setSubmitting, setStatus }) => {
setSubmitting(true);
@@ -370,15 +372,20 @@ export default function CustomModal({
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="provider"
title="Provider Name"
title="Provider"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
"See full list of supported LLM providers at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<ProviderNameSelect disabled={!!existingLlmProvider} />
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<APIKeyField
optional
subDescription="Paste your API key if your model provider requires authentication."
/>
<APIBaseField optional />
<InputLayouts.FieldPadder>
@@ -391,15 +398,10 @@ export default function CustomModal({
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<APIKeyField
optional
subDescription="Paste your API key if your model provider requires authentication."
/>
<InputLayouts.FieldPadder>
<Section gap={0.75}>
<Content
title="Additional Configs"
title="Environment Variables"
description={markdown(
"Add extra properties as needed by the model provider. These are passed to LiteLLM's `completion()` call as [environment variables](https://docs.litellm.ai/docs/set_keys#environment-variables). See [documentation](https://docs.onyx.app/admins/ai_models/custom_inference_provider) for more instructions."
)}

View File

@@ -61,15 +61,15 @@ function OpenAICompatibleModalInternals({
return (
<>
<APIBaseField
subDescription="The base URL of your OpenAI-compatible server."
subDescription={markdown(
"Paste your OpenAI-compatible endpoint URL. [Learn More](https://docs.litellm.ai/docs/providers/openai_compatible)"
)}
placeholder="http://localhost:8000/v1"
/>
<APIKeyField
optional
subDescription={markdown(
"Provide an API key if your server requires authentication."
)}
subDescription="Paste your API key if your model provider requires authentication."
/>
{!isOnboarding && (
@@ -123,6 +123,7 @@ export default function OpenAICompatibleModal({
llmProvider={existingLlmProvider}
onClose={onClose}
initialValues={initialValues}
description="Connect from other cloud or self-hosted models via OpenAI-compatible endpoints."
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
await submitProvider({

View File

@@ -1,75 +0,0 @@
import { LLMProviderName, LLMProviderView } from "@/interfaces/llm";
import AnthropicModal from "@/sections/modals/llmConfig/AnthropicModal";
import OpenAIModal from "@/sections/modals/llmConfig/OpenAIModal";
import OllamaModal from "@/sections/modals/llmConfig/OllamaModal";
import AzureModal from "@/sections/modals/llmConfig/AzureModal";
import VertexAIModal from "@/sections/modals/llmConfig/VertexAIModal";
import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
import LMStudioModal from "@/sections/modals/llmConfig/LMStudioModal";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
export function getModalForExistingProvider(
provider: LLMProviderView,
onOpenChange?: (open: boolean) => void,
defaultModelName?: string
) {
const props = {
existingLlmProvider: provider,
onOpenChange,
defaultModelName,
};
const hasCustomConfig = provider.custom_config != null;
switch (provider.provider) {
// These providers don't use custom_config themselves, so a non-null
// custom_config means the provider was created via CustomModal.
case LLMProviderName.OPENAI:
return hasCustomConfig ? (
<CustomModal {...props} />
) : (
<OpenAIModal {...props} />
);
case LLMProviderName.ANTHROPIC:
return hasCustomConfig ? (
<CustomModal {...props} />
) : (
<AnthropicModal {...props} />
);
case LLMProviderName.AZURE:
return hasCustomConfig ? (
<CustomModal {...props} />
) : (
<AzureModal {...props} />
);
case LLMProviderName.OPENROUTER:
return hasCustomConfig ? (
<CustomModal {...props} />
) : (
<OpenRouterModal {...props} />
);
// These providers legitimately store settings in custom_config,
// so always use their dedicated modals.
case LLMProviderName.OLLAMA_CHAT:
return <OllamaModal {...props} />;
case LLMProviderName.VERTEX_AI:
return <VertexAIModal {...props} />;
case LLMProviderName.BEDROCK:
return <BedrockModal {...props} />;
case LLMProviderName.LM_STUDIO:
return <LMStudioModal {...props} />;
case LLMProviderName.LITELLM_PROXY:
return <LiteLLMProxyModal {...props} />;
case LLMProviderName.BIFROST:
return <BifrostModal {...props} />;
case LLMProviderName.OPENAI_COMPATIBLE:
return <OpenAICompatibleModal {...props} />;
default:
return <CustomModal {...props} />;
}
}

View File

@@ -44,11 +44,7 @@ import useUsers from "@/hooks/useUsers";
import { toast } from "@/hooks/useToast";
import { UserRole } from "@/lib/types";
import Modal from "@/refresh-components/Modal";
import {
getProviderIcon,
getProviderDisplayName,
getProviderProductName,
} from "@/lib/llmConfig/providers";
import { getProvider } from "@/lib/llmConfig";
// ─── DisplayNameField ────────────────────────────────────────────────────────
@@ -640,6 +636,7 @@ export interface ModalWrapperProps<
validationSchema: FormikConfig<T>["validationSchema"];
onSubmit: FormikConfig<T>["onSubmit"];
children: React.ReactNode;
description?: string;
}
export function ModalWrapper<T extends BaseLLMFormValues = BaseLLMFormValues>({
providerName,
@@ -649,6 +646,7 @@ export function ModalWrapper<T extends BaseLLMFormValues = BaseLLMFormValues>({
validationSchema,
onSubmit,
children,
description,
}: ModalWrapperProps<T>) {
return (
<Formik
@@ -663,6 +661,7 @@ export function ModalWrapper<T extends BaseLLMFormValues = BaseLLMFormValues>({
llmProvider={llmProvider}
onClose={onClose}
modelConfigurations={initialValues.model_configurations}
description={description}
>
{children}
</ModalWrapperInner>
@@ -677,6 +676,7 @@ interface ModalWrapperInnerProps {
onClose: () => void;
modelConfigurations?: ModelConfiguration[];
children: React.ReactNode;
description?: string;
}
function ModalWrapperInner({
providerName,
@@ -684,6 +684,7 @@ function ModalWrapperInner({
onClose,
modelConfigurations,
children,
description: descriptionOverride,
}: ModalWrapperInnerProps) {
const { isValid, dirty, isSubmitting, status, setFieldValue, values } =
useFormikContext<BaseLLMFormValues>();
@@ -712,14 +713,18 @@ function ModalWrapperInner({
? "No changes to save."
: undefined;
const providerIcon = getProviderIcon(providerName);
const providerDisplayName = getProviderDisplayName(providerName);
const providerProductName = getProviderProductName(providerName);
const {
icon: providerIcon,
companyName: providerDisplayName,
productName: providerProductName,
} = getProvider(providerName);
const title = llmProvider
? `Configure "${llmProvider.name}"`
: `Set up ${providerProductName}`;
const description = `Connect to ${providerDisplayName} and set up your ${providerProductName} models.`;
const description =
descriptionOverride ??
`Connect to ${providerDisplayName} and set up your ${providerProductName} models.`;
return (
<Modal open onOpenChange={onClose}>

View File

@@ -1,145 +0,0 @@
import React from "react";
import {
WellKnownLLMProviderDescriptor,
LLMProviderName,
LLMProviderFormProps,
} from "@/interfaces/llm";
import { OnboardingActions, OnboardingState } from "@/interfaces/onboarding";
import OpenAIModal from "@/sections/modals/llmConfig/OpenAIModal";
import AnthropicModal from "@/sections/modals/llmConfig/AnthropicModal";
import OllamaModal from "@/sections/modals/llmConfig/OllamaModal";
import AzureModal from "@/sections/modals/llmConfig/AzureModal";
import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
import VertexAIModal from "@/sections/modals/llmConfig/VertexAIModal";
import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import LMStudioModal from "@/sections/modals/llmConfig/LMStudioModal";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
// Display info for LLM provider cards - title is the product name, displayName is the company/platform
const PROVIDER_DISPLAY_INFO: Record<
string,
{ title: string; displayName: string }
> = {
[LLMProviderName.OPENAI]: { title: "GPT", displayName: "OpenAI" },
[LLMProviderName.ANTHROPIC]: { title: "Claude", displayName: "Anthropic" },
[LLMProviderName.OLLAMA_CHAT]: { title: "Ollama", displayName: "Ollama" },
[LLMProviderName.AZURE]: {
title: "Azure OpenAI",
displayName: "Microsoft Azure Cloud",
},
[LLMProviderName.BEDROCK]: {
title: "Amazon Bedrock",
displayName: "AWS",
},
[LLMProviderName.VERTEX_AI]: {
title: "Gemini",
displayName: "Google Cloud Vertex AI",
},
[LLMProviderName.OPENROUTER]: {
title: "OpenRouter",
displayName: "OpenRouter",
},
[LLMProviderName.LM_STUDIO]: {
title: "LM Studio",
displayName: "LM Studio",
},
[LLMProviderName.LITELLM_PROXY]: {
title: "LiteLLM Proxy",
displayName: "LiteLLM Proxy",
},
[LLMProviderName.OPENAI_COMPATIBLE]: {
title: "OpenAI Compatible",
displayName: "OpenAI Compatible",
},
};
export function getProviderDisplayInfo(providerName: string): {
title: string;
displayName: string;
} {
return (
PROVIDER_DISPLAY_INFO[providerName] ?? {
title: providerName,
displayName: providerName,
}
);
}
export interface OnboardingFormProps {
llmDescriptor?: WellKnownLLMProviderDescriptor;
isCustomProvider?: boolean;
onboardingState: OnboardingState;
onboardingActions: OnboardingActions;
onOpenChange: (open: boolean) => void;
}
export function getOnboardingForm({
llmDescriptor,
isCustomProvider,
onboardingState,
onboardingActions,
onOpenChange,
}: OnboardingFormProps): React.ReactNode {
const providerName = isCustomProvider
? "custom"
: llmDescriptor?.name ?? "custom";
const sharedProps: LLMProviderFormProps = {
variant: "onboarding" as const,
shouldMarkAsDefault:
(onboardingState?.data.llmProviders ?? []).length === 0,
onboardingActions,
onOpenChange,
onSuccess: () => {
onboardingActions.updateData({
llmProviders: [
...(onboardingState?.data.llmProviders ?? []),
providerName,
],
});
onboardingActions.setButtonActive(true);
},
};
// Handle custom provider
if (isCustomProvider || !llmDescriptor) {
return <CustomModal {...sharedProps} />;
}
switch (llmDescriptor.name) {
case LLMProviderName.OPENAI:
return <OpenAIModal {...sharedProps} />;
case LLMProviderName.ANTHROPIC:
return <AnthropicModal {...sharedProps} />;
case LLMProviderName.OLLAMA_CHAT:
return <OllamaModal {...sharedProps} />;
case LLMProviderName.AZURE:
return <AzureModal {...sharedProps} />;
case LLMProviderName.BEDROCK:
return <BedrockModal {...sharedProps} />;
case LLMProviderName.VERTEX_AI:
return <VertexAIModal {...sharedProps} />;
case LLMProviderName.OPENROUTER:
return <OpenRouterModal {...sharedProps} />;
case LLMProviderName.LM_STUDIO:
return <LMStudioModal {...sharedProps} />;
case LLMProviderName.LITELLM_PROXY:
return <LiteLLMProxyModal {...sharedProps} />;
case LLMProviderName.OPENAI_COMPATIBLE:
return <OpenAICompatibleModal {...sharedProps} />;
default:
return <CustomModal {...sharedProps} />;
}
}

View File

@@ -4,35 +4,29 @@ import { memo, useState, useCallback } from "react";
import Text from "@/refresh-components/texts/Text";
import { Button } from "@opal/components";
import Separator from "@/refresh-components/Separator";
import LLMProviderCard from "../components/LLMProviderCard";
import LLMProviderCard from "@/sections/onboarding/components/LLMProviderCard";
import {
OnboardingActions,
OnboardingState,
OnboardingStep,
} from "@/interfaces/onboarding";
import { WellKnownLLMProviderDescriptor } from "@/interfaces/llm";
import {
getOnboardingForm,
getProviderDisplayInfo,
} from "../forms/getOnboardingForm";
LLMProviderFormProps,
WellKnownLLMProviderDescriptor,
} from "@/interfaces/llm";
import { getProvider } from "@/lib/llmConfig";
import { Disabled } from "@opal/core";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { SvgCheckCircle, SvgCpu, SvgExternalLink } from "@opal/icons";
import { ContentAction } from "@opal/layouts";
import { useLLMProviderOptions } from "@/lib/hooks/useLLMProviderOptions";
type LLMStepProps = {
state: OnboardingState;
actions: OnboardingActions;
disabled?: boolean;
};
interface SelectedProvider {
llmDescriptor?: WellKnownLLMProviderDescriptor;
isCustomProvider: boolean;
}
const LLMProviderSkeleton = () => {
function LLMProviderSkeleton() {
return (
<div className="flex justify-between h-full w-full p-1 rounded-12 border border-border-01 bg-background-neutral-01 animate-pulse">
<div className="flex gap-1 p-1 flex-1 min-w-0">
@@ -47,12 +41,11 @@ const LLMProviderSkeleton = () => {
<div className="h-6 w-16 bg-neutral-200 rounded" />
</div>
);
};
}
type StackedProviderIconsProps = {
interface StackedProviderIconsProps {
providers: string[];
};
}
const StackedProviderIcons = ({ providers }: StackedProviderIconsProps) => {
if (!providers || providers.length === 0) {
return null;
@@ -89,133 +82,157 @@ const StackedProviderIcons = ({ providers }: StackedProviderIconsProps) => {
);
};
const LLMStepInner = ({
state: onboardingState,
actions: onboardingActions,
disabled,
}: LLMStepProps) => {
const { llmProviderOptions, isLoading } = useLLMProviderOptions();
const llmDescriptors = llmProviderOptions ?? [];
interface LLMStepProps {
state: OnboardingState;
actions: OnboardingActions;
disabled?: boolean;
}
const LLMStep = memo(
({
state: onboardingState,
actions: onboardingActions,
disabled,
}: LLMStepProps) => {
const { llmProviderOptions, isLoading } = useLLMProviderOptions();
const llmDescriptors = llmProviderOptions ?? [];
const [selectedProvider, setSelectedProvider] =
useState<SelectedProvider | null>(null);
const [isModalOpen, setIsModalOpen] = useState(false);
const [selectedProvider, setSelectedProvider] =
useState<SelectedProvider | null>(null);
const [isModalOpen, setIsModalOpen] = useState(false);
const handleProviderClick = useCallback(
(
llmDescriptor?: WellKnownLLMProviderDescriptor,
isCustomProvider: boolean = false
) => {
setSelectedProvider({ llmDescriptor, isCustomProvider });
setIsModalOpen(true);
},
[]
);
const handleProviderClick = useCallback(
(
llmDescriptor?: WellKnownLLMProviderDescriptor,
isCustomProvider: boolean = false
) => {
setSelectedProvider({ llmDescriptor, isCustomProvider });
setIsModalOpen(true);
},
[]
);
const handleModalClose = useCallback((open: boolean) => {
setIsModalOpen(open);
if (!open) {
setSelectedProvider(null);
}
}, []);
const handleModalClose = useCallback((open: boolean) => {
setIsModalOpen(open);
if (!open) {
setSelectedProvider(null);
}
}, []);
if (
onboardingState.currentStep === OnboardingStep.LlmSetup ||
onboardingState.currentStep === OnboardingStep.Name
) {
return (
<Disabled disabled={disabled} allowClick>
<div
className="flex flex-col items-center justify-between w-full p-1 rounded-16 border border-border-01 bg-background-tint-00"
aria-label="onboarding-llm-step"
>
<ContentAction
icon={SvgCpu}
title="Connect your LLM models"
description="Onyx supports both self-hosted models and popular providers."
sizePreset="main-ui"
variant="section"
paddingVariant="lg"
rightChildren={
<Button
disabled={disabled}
prominence="tertiary"
rightIcon={SvgExternalLink}
href="/admin/configuration/llm"
>
View in Admin Panel
</Button>
}
/>
<Separator />
<div className="flex flex-wrap gap-1 [&>*:last-child:nth-child(odd)]:basis-full">
{isLoading ? (
Array.from({ length: 8 }).map((_, idx) => (
<div
key={idx}
className="basis-[calc(50%-theme(spacing.1)/2)] grow"
if (
onboardingState.currentStep === OnboardingStep.LlmSetup ||
onboardingState.currentStep === OnboardingStep.Name
) {
const providerName = selectedProvider?.isCustomProvider
? "custom"
: selectedProvider?.llmDescriptor?.name ?? "custom";
const { Modal: ModalComponent } = getProvider(providerName);
const modalProps: LLMProviderFormProps = {
variant: "onboarding" as const,
shouldMarkAsDefault:
(onboardingState?.data.llmProviders ?? []).length === 0,
onboardingActions,
onOpenChange: handleModalClose,
onSuccess: () => {
onboardingActions.updateData({
llmProviders: [
...(onboardingState?.data.llmProviders ?? []),
providerName,
],
});
onboardingActions.setButtonActive(true);
},
};
return (
<Disabled disabled={disabled} allowClick>
<div
className="flex flex-col items-center justify-between w-full p-1 rounded-16 border border-border-01 bg-background-tint-00"
aria-label="onboarding-llm-step"
>
<ContentAction
icon={SvgCpu}
title="Connect your LLM models"
description="Onyx supports both self-hosted models and popular providers."
sizePreset="main-ui"
variant="section"
paddingVariant="lg"
rightChildren={
<Button
disabled={disabled}
prominence="tertiary"
rightIcon={SvgExternalLink}
href="/admin/configuration/llm"
>
<LLMProviderSkeleton />
</div>
))
) : (
<>
{/* Render the selected provider form */}
{selectedProvider &&
isModalOpen &&
getOnboardingForm({
llmDescriptor: selectedProvider.llmDescriptor,
isCustomProvider: selectedProvider.isCustomProvider,
onboardingState,
onboardingActions,
onOpenChange: handleModalClose,
View in Admin Panel
</Button>
}
/>
<Separator />
<div className="flex flex-wrap gap-1 [&>*:last-child:nth-child(odd)]:basis-full">
{isLoading ? (
Array.from({ length: 8 }).map((_, idx) => (
<div
key={idx}
className="basis-[calc(50%-theme(spacing.1)/2)] grow"
>
<LLMProviderSkeleton />
</div>
))
) : (
<>
{/* Render the selected provider form */}
{selectedProvider && isModalOpen && (
<ModalComponent {...modalProps} />
)}
{/* Render provider cards */}
{llmDescriptors.map((llmDescriptor) => {
const { productName, companyName } = getProvider(
llmDescriptor.name
);
return (
<div
key={llmDescriptor.name}
className="basis-[calc(50%-theme(spacing.1)/2)] grow"
>
<LLMProviderCard
title={productName}
subtitle={companyName}
providerName={llmDescriptor.name}
disabled={disabled}
isConnected={onboardingState.data.llmProviders?.some(
(provider) => provider === llmDescriptor.name
)}
onClick={() =>
handleProviderClick(llmDescriptor, false)
}
/>
</div>
);
})}
{/* Render provider cards */}
{llmDescriptors.map((llmDescriptor) => {
const displayInfo = getProviderDisplayInfo(
llmDescriptor.name
);
return (
<div
key={llmDescriptor.name}
className="basis-[calc(50%-theme(spacing.1)/2)] grow"
>
<LLMProviderCard
title={displayInfo.title}
subtitle={displayInfo.displayName}
providerName={llmDescriptor.name}
disabled={disabled}
isConnected={onboardingState.data.llmProviders?.some(
(provider) => provider === llmDescriptor.name
)}
onClick={() =>
handleProviderClick(llmDescriptor, false)
}
/>
</div>
);
})}
{/* Custom provider card */}
<div className="basis-[calc(50%-theme(spacing.1)/2)] grow">
<LLMProviderCard
title="Custom LLM Provider"
subtitle="LiteLLM Compatible APIs"
disabled={disabled}
isConnected={onboardingState.data.llmProviders?.some(
(provider) => provider === "custom"
)}
onClick={() => handleProviderClick(undefined, true)}
/>
</div>
</>
)}
{/* Custom provider card */}
<div className="basis-[calc(50%-theme(spacing.1)/2)] grow">
<LLMProviderCard
title="Custom LLM Provider"
subtitle="LiteLLM Compatible APIs"
disabled={disabled}
isConnected={onboardingState.data.llmProviders?.some(
(provider) => provider === "custom"
)}
onClick={() => handleProviderClick(undefined, true)}
/>
</div>
</>
)}
</div>
</div>
</div>
</Disabled>
);
} else {
</Disabled>
);
}
return (
<button
type="button"
@@ -244,7 +261,7 @@ const LLMStepInner = ({
</button>
);
}
};
);
LLMStep.displayName = "LLMStep";
const LLMStep = memo(LLMStepInner);
export default LLMStep;