Compare commits

..

7 Commits

72 changed files with 4877 additions and 2876 deletions

View File

@@ -996,3 +996,72 @@ def set_group_permission__no_commit(
db_session.flush()
recompute_permissions_for_group__no_commit(group_id, db_session)
def set_group_permissions_bulk__no_commit(
group_id: int,
desired_permissions: set[Permission],
granted_by: UUID,
db_session: Session,
) -> list[Permission]:
"""Set the full desired permission state for a group in one pass.
Enables permissions in `desired_permissions`, disables any toggleable
permission not in the set. Non-toggleable permissions are ignored.
Calls recompute once at the end. Does NOT commit.
Returns the resulting list of enabled permissions.
"""
existing_grants = (
db_session.execute(
select(PermissionGrant)
.where(PermissionGrant.group_id == group_id)
.with_for_update()
)
.scalars()
.all()
)
grant_map: dict[Permission, PermissionGrant] = {
g.permission: g for g in existing_grants
}
# Enable desired permissions
for perm in desired_permissions:
existing = grant_map.get(perm)
if existing is not None:
if existing.is_deleted:
existing.is_deleted = False
existing.granted_by = granted_by
existing.granted_at = func.now()
else:
db_session.add(
PermissionGrant(
group_id=group_id,
permission=perm,
grant_source=GrantSource.USER,
granted_by=granted_by,
)
)
# Disable toggleable permissions not in the desired set
for perm, grant in grant_map.items():
if perm not in desired_permissions and not grant.is_deleted:
grant.is_deleted = True
db_session.flush()
recompute_permissions_for_group__no_commit(group_id, db_session)
# Return the resulting enabled set
return [
g.permission
for g in db_session.execute(
select(PermissionGrant).where(
PermissionGrant.group_id == group_id,
PermissionGrant.is_deleted.is_(False),
)
)
.scalars()
.all()
]

View File

@@ -13,20 +13,21 @@ from ee.onyx.db.user_group import fetch_user_groups_for_user
from ee.onyx.db.user_group import insert_user_group
from ee.onyx.db.user_group import prepare_user_group_for_deletion
from ee.onyx.db.user_group import rename_user_group
from ee.onyx.db.user_group import set_group_permission__no_commit
from ee.onyx.db.user_group import set_group_permissions_bulk__no_commit
from ee.onyx.db.user_group import update_user_curator_relationship
from ee.onyx.db.user_group import update_user_group
from ee.onyx.server.user_group.models import AddUsersToUserGroupRequest
from ee.onyx.server.user_group.models import BulkSetPermissionsRequest
from ee.onyx.server.user_group.models import MinimalUserGroupSnapshot
from ee.onyx.server.user_group.models import SetCuratorRequest
from ee.onyx.server.user_group.models import SetPermissionRequest
from ee.onyx.server.user_group.models import SetPermissionResponse
from ee.onyx.server.user_group.models import UpdateGroupAgentsRequest
from ee.onyx.server.user_group.models import UserGroup
from ee.onyx.server.user_group.models import UserGroupCreate
from ee.onyx.server.user_group.models import UserGroupRename
from ee.onyx.server.user_group.models import UserGroupUpdate
from onyx.auth.permissions import NON_TOGGLEABLE_PERMISSIONS
from onyx.auth.permissions import PERMISSION_REGISTRY
from onyx.auth.permissions import PermissionRegistryEntry
from onyx.auth.permissions import require_permission
from onyx.auth.users import current_curator_or_admin_user
from onyx.configs.app_configs import DISABLE_VECTOR_DB
@@ -48,24 +49,15 @@ router = APIRouter(prefix="/manage", tags=PUBLIC_API_TAGS)
@router.get("/admin/user-group")
def list_user_groups(
include_default: bool = False,
user: User = Depends(current_curator_or_admin_user),
_: User = Depends(require_permission(Permission.READ_USER_GROUPS)),
db_session: Session = Depends(get_session),
) -> list[UserGroup]:
if user.role == UserRole.ADMIN:
user_groups = fetch_user_groups(
db_session,
only_up_to_date=False,
eager_load_for_snapshot=True,
include_default=include_default,
)
else:
user_groups = fetch_user_groups_for_user(
db_session=db_session,
user_id=user.id,
only_curator_groups=user.role == UserRole.CURATOR,
eager_load_for_snapshot=True,
include_default=include_default,
)
user_groups = fetch_user_groups(
db_session,
only_up_to_date=False,
eager_load_for_snapshot=True,
include_default=include_default,
)
return [UserGroup.from_model(user_group) for user_group in user_groups]
@@ -92,6 +84,13 @@ def list_minimal_user_groups(
]
@router.get("/admin/permissions/registry")
def get_permission_registry(
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
) -> list[PermissionRegistryEntry]:
return PERMISSION_REGISTRY
@router.get("/admin/user-group/{user_group_id}/permissions")
def get_user_group_permissions(
user_group_id: int,
@@ -102,37 +101,39 @@ def get_user_group_permissions(
if group is None:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "User group not found")
return [
grant.permission for grant in group.permission_grants if not grant.is_deleted
grant.permission
for grant in group.permission_grants
if not grant.is_deleted and grant.permission not in NON_TOGGLEABLE_PERMISSIONS
]
@router.put("/admin/user-group/{user_group_id}/permissions")
def set_user_group_permission(
def set_user_group_permissions(
user_group_id: int,
request: SetPermissionRequest,
request: BulkSetPermissionsRequest,
user: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
db_session: Session = Depends(get_session),
) -> SetPermissionResponse:
) -> list[Permission]:
group = fetch_user_group(db_session, user_group_id)
if group is None:
raise OnyxError(OnyxErrorCode.NOT_FOUND, "User group not found")
if request.permission in NON_TOGGLEABLE_PERMISSIONS:
non_toggleable = [p for p in request.permissions if p in NON_TOGGLEABLE_PERMISSIONS]
if non_toggleable:
raise OnyxError(
OnyxErrorCode.INVALID_INPUT,
f"Permission '{request.permission}' cannot be toggled via this endpoint",
f"Permissions {non_toggleable} cannot be toggled via this endpoint",
)
set_group_permission__no_commit(
result = set_group_permissions_bulk__no_commit(
group_id=user_group_id,
permission=request.permission,
enabled=request.enabled,
desired_permissions=set(request.permissions),
granted_by=user.id,
db_session=db_session,
)
db_session.commit()
return SetPermissionResponse(permission=request.permission, enabled=request.enabled)
return result
@router.post("/admin/user-group")

View File

@@ -132,3 +132,7 @@ class SetPermissionRequest(BaseModel):
class SetPermissionResponse(BaseModel):
permission: Permission
enabled: bool
class BulkSetPermissionsRequest(BaseModel):
permissions: list[Permission]

View File

@@ -11,6 +11,8 @@ from collections.abc import Coroutine
from typing import Any
from fastapi import Depends
from pydantic import BaseModel
from pydantic import field_validator
from onyx.auth.users import current_user
from onyx.db.enums import Permission
@@ -34,9 +36,7 @@ IMPLIED_PERMISSIONS: dict[str, set[str]] = {
Permission.READ_DOCUMENT_SETS.value,
Permission.READ_CONNECTORS.value,
},
Permission.ADD_CONNECTORS.value: {Permission.READ_CONNECTORS.value},
Permission.MANAGE_CONNECTORS.value: {
Permission.ADD_CONNECTORS.value,
Permission.READ_CONNECTORS.value,
},
Permission.MANAGE_USER_GROUPS.value: {
@@ -44,6 +44,11 @@ IMPLIED_PERMISSIONS: dict[str, set[str]] = {
Permission.READ_DOCUMENT_SETS.value,
Permission.READ_AGENTS.value,
Permission.READ_USERS.value,
Permission.READ_USER_GROUPS.value,
},
Permission.MANAGE_LLMS.value: {
Permission.READ_USER_GROUPS.value,
Permission.READ_AGENTS.value,
},
}
@@ -58,10 +63,129 @@ NON_TOGGLEABLE_PERMISSIONS: frozenset[Permission] = frozenset(
Permission.READ_DOCUMENT_SETS,
Permission.READ_AGENTS,
Permission.READ_USERS,
Permission.READ_USER_GROUPS,
}
)
class PermissionRegistryEntry(BaseModel):
"""A UI-facing permission row served by GET /admin/permissions/registry.
The field_validator ensures non-toggleable permissions (BASIC_ACCESS,
FULL_ADMIN_PANEL_ACCESS, READ_*) can never appear in the registry.
"""
id: str
display_name: str
description: str
permissions: list[Permission]
group: int
@field_validator("permissions")
@classmethod
def must_be_toggleable(cls, v: list[Permission]) -> list[Permission]:
for p in v:
if p in NON_TOGGLEABLE_PERMISSIONS:
raise ValueError(
f"Permission '{p.value}' is not toggleable and "
"cannot be included in the permission registry"
)
return v
# Registry of toggleable permissions exposed to the admin UI.
# Single source of truth for display names, descriptions, grouping,
# and which backend tokens each UI row controls.
# The frontend fetches this via GET /admin/permissions/registry
# and only adds icon mapping locally.
PERMISSION_REGISTRY: list[PermissionRegistryEntry] = [
# Group 0 — System Configuration
PermissionRegistryEntry(
id="manage_llms",
display_name="Manage LLMs",
description="Add and update configurations for language models (LLMs).",
permissions=[Permission.MANAGE_LLMS],
group=0,
),
PermissionRegistryEntry(
id="manage_connectors_and_document_sets",
display_name="Manage Connectors & Document Sets",
description="Add and update connectors and document sets.",
permissions=[
Permission.MANAGE_CONNECTORS,
Permission.MANAGE_DOCUMENT_SETS,
],
group=0,
),
PermissionRegistryEntry(
id="manage_actions",
display_name="Manage Actions",
description="Add and update custom tools and MCP/OpenAPI actions.",
permissions=[Permission.MANAGE_ACTIONS],
group=0,
),
# Group 1 — User & Access Management
PermissionRegistryEntry(
id="manage_groups",
display_name="Manage Groups",
description="Add and update user groups.",
permissions=[Permission.MANAGE_USER_GROUPS],
group=1,
),
PermissionRegistryEntry(
id="manage_service_accounts",
display_name="Manage Service Accounts",
description="Add and update service accounts and their API keys.",
permissions=[Permission.CREATE_SERVICE_ACCOUNT_API_KEYS],
group=1,
),
PermissionRegistryEntry(
id="manage_slack_discord_bots",
display_name="Manage Slack/Discord Bots",
description="Add and update Onyx integrations with Slack or Discord.",
permissions=[Permission.CREATE_SLACK_DISCORD_BOTS],
group=1,
),
# Group 2 — Agents
PermissionRegistryEntry(
id="create_agents",
display_name="Create Agents",
description="Create and edit the user's own agents.",
permissions=[Permission.ADD_AGENTS],
group=2,
),
PermissionRegistryEntry(
id="manage_agents",
display_name="Manage Agents",
description="View and update all public and shared agents in the organization.",
permissions=[Permission.MANAGE_AGENTS],
group=2,
),
# Group 3 — Monitoring & Tokens
PermissionRegistryEntry(
id="view_agent_analytics",
display_name="View Agent Analytics",
description="View analytics for agents the group can manage.",
permissions=[Permission.READ_AGENT_ANALYTICS],
group=3,
),
PermissionRegistryEntry(
id="view_query_history",
display_name="View Query History",
description="View query history of everyone in the organization.",
permissions=[Permission.READ_QUERY_HISTORY],
group=3,
),
PermissionRegistryEntry(
id="create_user_access_token",
display_name="Create User Access Token",
description="Add and update the user's personal access tokens.",
permissions=[Permission.CREATE_USER_API_KEYS],
group=3,
),
]
def resolve_effective_permissions(granted: set[str]) -> set[str]:
"""Expand granted permissions with their implied permissions.
@@ -83,7 +207,12 @@ def resolve_effective_permissions(granted: set[str]) -> set[str]:
def get_effective_permissions(user: User) -> set[Permission]:
"""Read granted permissions from the column and expand implied permissions."""
"""Read granted permissions from the column and expand implied permissions.
Admin-role users always receive all permissions regardless of the JSONB
column, maintaining backward compatibility with role-based access control.
"""
granted: set[Permission] = set()
for p in user.effective_permissions:
try:
@@ -96,6 +225,11 @@ def get_effective_permissions(user: User) -> set[Permission]:
return {Permission(p) for p in expanded}
def has_permission(user: User, permission: Permission) -> bool:
"""Check whether *user* holds *permission* (directly or via implication/admin override)."""
return permission in get_effective_permissions(user)
def require_permission(
required: Permission,
) -> Callable[..., Coroutine[Any, Any, User]]:

View File

@@ -366,12 +366,12 @@ class Permission(str, PyEnum):
READ_DOCUMENT_SETS = "read:document_sets"
READ_AGENTS = "read:agents"
READ_USERS = "read:users"
READ_USER_GROUPS = "read:user_groups"
# Add / Manage pairs
ADD_AGENTS = "add:agents"
MANAGE_AGENTS = "manage:agents"
MANAGE_DOCUMENT_SETS = "manage:document_sets"
ADD_CONNECTORS = "add:connectors"
MANAGE_CONNECTORS = "manage:connectors"
MANAGE_LLMS = "manage:llms"

View File

@@ -16,12 +16,14 @@ from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.access.hierarchy_access import get_user_external_group_ids
from onyx.auth.permissions import has_permission
from onyx.auth.schemas import UserRole
from onyx.configs.app_configs import CURATORS_CANNOT_VIEW_OR_EDIT_NON_OWNED_ASSISTANTS
from onyx.configs.constants import DEFAULT_PERSONA_ID
from onyx.configs.constants import NotificationType
from onyx.db.constants import SLACK_BOT_PERSONA_PREFIX
from onyx.db.document_access import get_accessible_documents_by_ids
from onyx.db.enums import Permission
from onyx.db.models import ConnectorCredentialPair
from onyx.db.models import Document
from onyx.db.models import DocumentSet
@@ -74,7 +76,7 @@ class PersonaLoadType(Enum):
def _add_user_filters(
stmt: Select[tuple[Persona]], user: User, get_editable: bool = True
) -> Select[tuple[Persona]]:
if user.role == UserRole.ADMIN:
if user.role == UserRole.ADMIN or has_permission(user, Permission.READ_AGENTS):
return stmt
stmt = stmt.distinct()

View File

@@ -90,7 +90,6 @@ from onyx.onyxbot.slack.utils import respond_in_thread_or_channel
from onyx.onyxbot.slack.utils import TenantSocketModeClient
from onyx.redis.redis_pool import get_redis_client
from onyx.server.manage.models import SlackBotTokens
from onyx.tracing.setup import setup_tracing
from onyx.utils.logger import setup_logger
from onyx.utils.variable_functionality import fetch_ee_implementation_or_noop
from onyx.utils.variable_functionality import set_is_ee_based_on_env_variable
@@ -1207,7 +1206,6 @@ if __name__ == "__main__":
tenant_handler = SlackbotHandler()
set_is_ee_based_on_env_variable()
setup_tracing()
try:
# Keep the main thread alive

View File

@@ -15,8 +15,8 @@ from fastapi import Query
from pydantic import ValidationError
from sqlalchemy.orm import Session
from onyx.auth.permissions import has_permission
from onyx.auth.permissions import require_permission
from onyx.auth.schemas import UserRole
from onyx.auth.users import current_chat_accessible_user
from onyx.db.engine.sql_engine import get_session
from onyx.db.enums import LLMModelFlowType
@@ -252,7 +252,7 @@ def _validate_llm_provider_change(
@admin_router.get("/built-in/options")
def fetch_llm_options(
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
) -> list[WellKnownLLMProviderDescriptor]:
return fetch_available_well_known_llms()
@@ -260,7 +260,7 @@ def fetch_llm_options(
@admin_router.get("/built-in/options/{provider_name}")
def fetch_llm_provider_options(
provider_name: str,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
) -> WellKnownLLMProviderDescriptor:
well_known_llms = fetch_available_well_known_llms()
for well_known_llm in well_known_llms:
@@ -272,7 +272,7 @@ def fetch_llm_provider_options(
@admin_router.post("/test")
def test_llm_configuration(
test_llm_request: TestLLMRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> None:
"""Test LLM configuration settings"""
@@ -330,7 +330,7 @@ def test_llm_configuration(
@admin_router.post("/test/default")
def test_default_provider(
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
) -> None:
try:
llm = get_default_llm()
@@ -346,7 +346,7 @@ def test_default_provider(
@admin_router.get("/provider")
def list_llm_providers(
include_image_gen: bool = Query(False),
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> LLMProviderResponse[LLMProviderView]:
start_time = datetime.now(timezone.utc)
@@ -391,7 +391,7 @@ def put_llm_provider(
False,
description="True if creating a new one, False if updating an existing provider",
),
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> LLMProviderView:
# validate request (e.g. if we're intending to create but the name already exists we should throw an error)
@@ -529,7 +529,7 @@ def put_llm_provider(
def delete_llm_provider(
provider_id: int,
force: bool = Query(False),
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> None:
if not force:
@@ -550,7 +550,7 @@ def delete_llm_provider(
@admin_router.post("/default")
def set_provider_as_default(
default_model_request: DefaultModel,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> None:
update_default_provider(
@@ -563,7 +563,7 @@ def set_provider_as_default(
@admin_router.post("/default-vision")
def set_provider_as_default_vision(
default_model: DefaultModel,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> None:
update_default_vision_provider(
@@ -575,7 +575,7 @@ def set_provider_as_default_vision(
@admin_router.get("/auto-config")
def get_auto_config(
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
) -> dict:
"""Get the current Auto mode configuration from GitHub.
@@ -593,7 +593,7 @@ def get_auto_config(
@admin_router.get("/vision-providers")
def get_vision_capable_providers(
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> LLMProviderResponse[VisionProviderResponse]:
"""Return a list of LLM providers and their models that support image input"""
@@ -655,7 +655,7 @@ def list_llm_provider_basics(
all_providers = fetch_existing_llm_providers(db_session, [])
user_group_ids = fetch_user_group_ids(db_session, user)
is_admin = user.role == UserRole.ADMIN
can_manage_llms = has_permission(user, Permission.MANAGE_LLMS)
accessible_providers = []
@@ -667,7 +667,7 @@ def list_llm_provider_basics(
# - Excludes providers with persona restrictions (requires specific persona)
# - Excludes non-public providers with no restrictions (admin-only)
if can_user_access_llm_provider(
provider, user_group_ids, persona=None, is_admin=is_admin
provider, user_group_ids, persona=None, is_admin=can_manage_llms
):
accessible_providers.append(LLMProviderDescriptor.from_model(provider))
@@ -703,17 +703,19 @@ def get_valid_model_names_for_persona(
if not persona:
return []
is_admin = user.role == UserRole.ADMIN
can_manage_llms = has_permission(user, Permission.MANAGE_LLMS)
all_providers = fetch_existing_llm_providers(
db_session, [LLMModelFlowType.CHAT, LLMModelFlowType.VISION]
)
user_group_ids = set() if is_admin else fetch_user_group_ids(db_session, user)
user_group_ids = (
set() if can_manage_llms else fetch_user_group_ids(db_session, user)
)
valid_models = []
for llm_provider_model in all_providers:
# Check access with persona context — respects all RBAC restrictions
if can_user_access_llm_provider(
llm_provider_model, user_group_ids, persona, is_admin=is_admin
llm_provider_model, user_group_ids, persona, is_admin=can_manage_llms
):
# Collect all model names from this provider
for model_config in llm_provider_model.model_configurations:
@@ -752,18 +754,20 @@ def list_llm_providers_for_persona(
"You don't have access to this assistant",
)
is_admin = user.role == UserRole.ADMIN
can_manage_llms = has_permission(user, Permission.MANAGE_LLMS)
all_providers = fetch_existing_llm_providers(
db_session, [LLMModelFlowType.CHAT, LLMModelFlowType.VISION]
)
user_group_ids = set() if is_admin else fetch_user_group_ids(db_session, user)
user_group_ids = (
set() if can_manage_llms else fetch_user_group_ids(db_session, user)
)
llm_provider_list: list[LLMProviderDescriptor] = []
for llm_provider_model in all_providers:
# Check access with persona context — respects persona restrictions
if can_user_access_llm_provider(
llm_provider_model, user_group_ids, persona, is_admin=is_admin
llm_provider_model, user_group_ids, persona, is_admin=can_manage_llms
):
llm_provider_list.append(
LLMProviderDescriptor.from_model(llm_provider_model)
@@ -791,7 +795,7 @@ def list_llm_providers_for_persona(
if persona_default_provider:
provider = fetch_existing_llm_provider(persona_default_provider, db_session)
if provider and can_user_access_llm_provider(
provider, user_group_ids, persona, is_admin=is_admin
provider, user_group_ids, persona, is_admin=can_manage_llms
):
if persona_default_model:
# Persona specifies both provider and model — use them directly
@@ -824,7 +828,7 @@ def list_llm_providers_for_persona(
@admin_router.get("/provider-contextual-cost")
def get_provider_contextual_cost(
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> list[LLMCost]:
"""
@@ -873,7 +877,7 @@ def get_provider_contextual_cost(
@admin_router.post("/bedrock/available-models")
def get_bedrock_available_models(
request: BedrockModelsRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> list[BedrockFinalModelResponse]:
"""Fetch available Bedrock models for a specific region and credentials.
@@ -1048,7 +1052,7 @@ def _get_ollama_available_model_names(api_base: str) -> set[str]:
@admin_router.post("/ollama/available-models")
def get_ollama_available_models(
request: OllamaModelsRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> list[OllamaFinalModelResponse]:
"""Fetch the list of available models from an Ollama server."""
@@ -1172,7 +1176,7 @@ def _get_openrouter_models_response(api_base: str, api_key: str) -> dict:
@admin_router.post("/openrouter/available-models")
def get_openrouter_available_models(
request: OpenRouterModelsRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> list[OpenRouterFinalModelResponse]:
"""Fetch available models from OpenRouter `/models` endpoint.
@@ -1253,7 +1257,7 @@ def get_openrouter_available_models(
@admin_router.post("/lm-studio/available-models")
def get_lm_studio_available_models(
request: LMStudioModelsRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> list[LMStudioFinalModelResponse]:
"""Fetch available models from an LM Studio server.
@@ -1360,7 +1364,7 @@ def get_lm_studio_available_models(
@admin_router.post("/litellm/available-models")
def get_litellm_available_models(
request: LitellmModelsRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> list[LitellmFinalModelResponse]:
"""Fetch available models from Litellm proxy /v1/models endpoint."""
@@ -1493,7 +1497,7 @@ def _get_openai_compatible_models_response(
@admin_router.post("/bifrost/available-models")
def get_bifrost_available_models(
request: BifrostModelsRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> list[BifrostFinalModelResponse]:
"""Fetch available models from Bifrost gateway /v1/models endpoint."""
@@ -1583,7 +1587,7 @@ def _get_bifrost_models_response(api_base: str, api_key: str | None = None) -> d
@admin_router.post("/openai-compatible/available-models")
def get_openai_compatible_server_available_models(
request: OpenAICompatibleModelsRequest,
_: User = Depends(require_permission(Permission.FULL_ADMIN_PANEL_ACCESS)),
_: User = Depends(require_permission(Permission.MANAGE_LLMS)),
db_session: Session = Depends(get_session),
) -> list[OpenAICompatibleFinalModelResponse]:
"""Fetch available models from a generic OpenAI-compatible /v1/models endpoint."""

View File

@@ -135,6 +135,7 @@ class UserInfo(BaseModel):
is_anonymous_user: bool | None = None
password_configured: bool | None = None
tenant_info: TenantInfo | None = None
effective_permissions: list[str] = Field(default_factory=list)
@classmethod
def from_model(
@@ -148,6 +149,7 @@ class UserInfo(BaseModel):
tenant_info: TenantInfo | None = None,
assistant_specific_configs: UserSpecificAssistantPreferences | None = None,
memories: list[MemoryItem] | None = None,
effective_permissions: list[str] | None = None,
) -> "UserInfo":
return cls(
id=str(user.id),
@@ -187,6 +189,7 @@ class UserInfo(BaseModel):
is_cloud_superuser=is_cloud_superuser,
is_anonymous_user=is_anonymous_user,
tenant_info=tenant_info,
effective_permissions=effective_permissions or [],
personalization=UserPersonalization(
name=user.personal_name or "",
role=user.personal_role or "",

View File

@@ -857,6 +857,7 @@ def verify_user_logged_in(
invitation=tenant_invitation,
),
memories=memories,
effective_permissions=sorted(p.value for p in get_effective_permissions(user)),
)
return user_info

View File

@@ -117,15 +117,14 @@ class UserGroupManager:
return response.json()
@staticmethod
def set_permission(
def set_permissions(
user_group: DATestUserGroup,
permission: str,
enabled: bool,
permissions: list[str],
user_performing_action: DATestUser,
) -> requests.Response:
response = requests.put(
f"{API_SERVER_URL}/manage/admin/user-group/{user_group.id}/permissions",
json={"permission": permission, "enabled": enabled},
json={"permissions": permissions},
headers=user_performing_action.headers,
)
return response

View File

@@ -13,7 +13,7 @@ ENTERPRISE_SKIP = pytest.mark.skipif(
@ENTERPRISE_SKIP
def test_grant_permission_via_toggle(reset: None) -> None: # noqa: ARG001
def test_grant_permission_via_bulk(reset: None) -> None: # noqa: ARG001
admin_user: DATestUser = UserManager.create(name="admin_grant")
basic_user: DATestUser = UserManager.create(name="basic_grant")
@@ -23,10 +23,11 @@ def test_grant_permission_via_toggle(reset: None) -> None: # noqa: ARG001
user_performing_action=admin_user,
)
# Grant manage:llms
resp = UserGroupManager.set_permission(group, "manage:llms", True, admin_user)
# Set desired permissions to [manage:llms]
resp = UserGroupManager.set_permissions(group, ["manage:llms"], admin_user)
resp.raise_for_status()
assert resp.json() == {"permission": "manage:llms", "enabled": True}
result = resp.json()
assert "manage:llms" in result, f"Expected manage:llms in {result}"
# Verify group permissions
group_perms = UserGroupManager.get_permissions(group, admin_user)
@@ -38,7 +39,7 @@ def test_grant_permission_via_toggle(reset: None) -> None: # noqa: ARG001
@ENTERPRISE_SKIP
def test_revoke_permission_via_toggle(reset: None) -> None: # noqa: ARG001
def test_revoke_permission_via_bulk(reset: None) -> None: # noqa: ARG001
admin_user: DATestUser = UserManager.create(name="admin_revoke")
basic_user: DATestUser = UserManager.create(name="basic_revoke")
@@ -48,13 +49,11 @@ def test_revoke_permission_via_toggle(reset: None) -> None: # noqa: ARG001
user_performing_action=admin_user,
)
# Grant then revoke
UserGroupManager.set_permission(
group, "manage:llms", True, admin_user
).raise_for_status()
UserGroupManager.set_permission(
group, "manage:llms", False, admin_user
# Grant then revoke by sending empty list
UserGroupManager.set_permissions(
group, ["manage:llms"], admin_user
).raise_for_status()
UserGroupManager.set_permissions(group, [], admin_user).raise_for_status()
# Verify removed from group
group_perms = UserGroupManager.get_permissions(group, admin_user)
@@ -68,7 +67,7 @@ def test_revoke_permission_via_toggle(reset: None) -> None: # noqa: ARG001
@ENTERPRISE_SKIP
def test_idempotent_grant(reset: None) -> None: # noqa: ARG001
def test_idempotent_bulk_set(reset: None) -> None: # noqa: ARG001
admin_user: DATestUser = UserManager.create(name="admin_idempotent_grant")
group = UserGroupManager.create(
@@ -77,12 +76,12 @@ def test_idempotent_grant(reset: None) -> None: # noqa: ARG001
user_performing_action=admin_user,
)
# Toggle ON twice
UserGroupManager.set_permission(
group, "manage:llms", True, admin_user
# Set same permissions twice
UserGroupManager.set_permissions(
group, ["manage:llms"], admin_user
).raise_for_status()
UserGroupManager.set_permission(
group, "manage:llms", True, admin_user
UserGroupManager.set_permissions(
group, ["manage:llms"], admin_user
).raise_for_status()
group_perms = UserGroupManager.get_permissions(group, admin_user)
@@ -92,22 +91,22 @@ def test_idempotent_grant(reset: None) -> None: # noqa: ARG001
@ENTERPRISE_SKIP
def test_idempotent_revoke(reset: None) -> None: # noqa: ARG001
admin_user: DATestUser = UserManager.create(name="admin_idempotent_revoke")
def test_empty_permissions_is_valid(reset: None) -> None: # noqa: ARG001
admin_user: DATestUser = UserManager.create(name="admin_empty")
group = UserGroupManager.create(
name="idempotent-revoke-group",
name="empty-perms-group",
user_ids=[admin_user.id],
user_performing_action=admin_user,
)
# Toggle OFF when never granted — should not error
resp = UserGroupManager.set_permission(group, "manage:llms", False, admin_user)
# Setting empty list should not error
resp = UserGroupManager.set_permissions(group, [], admin_user)
resp.raise_for_status()
@ENTERPRISE_SKIP
def test_cannot_toggle_basic_access(reset: None) -> None: # noqa: ARG001
def test_cannot_set_basic_access(reset: None) -> None: # noqa: ARG001
admin_user: DATestUser = UserManager.create(name="admin_basic_block")
group = UserGroupManager.create(
@@ -116,12 +115,12 @@ def test_cannot_toggle_basic_access(reset: None) -> None: # noqa: ARG001
user_performing_action=admin_user,
)
resp = UserGroupManager.set_permission(group, "basic", True, admin_user)
resp = UserGroupManager.set_permissions(group, ["basic"], admin_user)
assert resp.status_code == 400, f"Expected 400, got {resp.status_code}"
@ENTERPRISE_SKIP
def test_cannot_toggle_admin(reset: None) -> None: # noqa: ARG001
def test_cannot_set_admin(reset: None) -> None: # noqa: ARG001
admin_user: DATestUser = UserManager.create(name="admin_admin_block")
group = UserGroupManager.create(
@@ -130,7 +129,7 @@ def test_cannot_toggle_admin(reset: None) -> None: # noqa: ARG001
user_performing_action=admin_user,
)
resp = UserGroupManager.set_permission(group, "admin", True, admin_user)
resp = UserGroupManager.set_permissions(group, ["admin"], admin_user)
assert resp.status_code == 400, f"Expected 400, got {resp.status_code}"
@@ -146,11 +145,44 @@ def test_implied_permissions_expand(reset: None) -> None: # noqa: ARG001
)
# Grant manage:agents — should imply add:agents and read:agents
UserGroupManager.set_permission(
group, "manage:agents", True, admin_user
UserGroupManager.set_permissions(
group, ["manage:agents"], admin_user
).raise_for_status()
user_perms = UserManager.get_permissions(basic_user)
assert "manage:agents" in user_perms, f"Missing manage:agents: {user_perms}"
assert "add:agents" in user_perms, f"Missing implied add:agents: {user_perms}"
assert "read:agents" in user_perms, f"Missing implied read:agents: {user_perms}"
@ENTERPRISE_SKIP
def test_bulk_replaces_previous_state(reset: None) -> None: # noqa: ARG001
"""Setting a new permission list should disable ones no longer included."""
admin_user: DATestUser = UserManager.create(name="admin_replace")
group = UserGroupManager.create(
name="replace-state-group",
user_ids=[admin_user.id],
user_performing_action=admin_user,
)
# Set initial permissions
UserGroupManager.set_permissions(
group, ["manage:llms", "manage:actions"], admin_user
).raise_for_status()
# Replace with a different set
UserGroupManager.set_permissions(
group, ["manage:actions", "manage:user_groups"], admin_user
).raise_for_status()
group_perms = UserGroupManager.get_permissions(group, admin_user)
assert (
"manage:llms" not in group_perms
), f"manage:llms should be removed: {group_perms}"
assert (
"manage:actions" in group_perms
), f"manage:actions should remain: {group_perms}"
assert (
"manage:user_groups" in group_perms
), f"manage:user_groups should be added: {group_perms}"

View File

@@ -0,0 +1,27 @@
import type { IconProps } from "@opal/types";
const SvgCreateAgent = ({ size, ...props }: IconProps) => (
<svg
width={size}
height={size}
viewBox="0 0 16 16"
fill="none"
xmlns="http://www.w3.org/2000/svg"
stroke="currentColor"
{...props}
>
<path
d="M4.5 2.5L8 1L11.5 2.5M13.5 4.5L15 8L13.5 11.5M11.5 13.5L8 15L4.5 13.5M2.5 11.5L1 7.99999L2.5 4.5"
strokeWidth={1.5}
strokeLinecap="round"
strokeLinejoin="round"
/>
<path
d="M5 8L8 8.00001M8 8.00001L11 8.00001M8 8.00001L8 5M8 8.00001L8 11"
strokeWidth={1.5}
strokeLinecap="round"
strokeLinejoin="round"
/>
</svg>
);
export default SvgCreateAgent;

View File

@@ -55,6 +55,7 @@ export { default as SvgColumn } from "@opal/icons/column";
export { default as SvgCopy } from "@opal/icons/copy";
export { default as SvgCornerRightUpDot } from "@opal/icons/corner-right-up-dot";
export { default as SvgCpu } from "@opal/icons/cpu";
export { default as SvgCreateAgent } from "@opal/icons/create-agent";
export { default as SvgCurate } from "@opal/icons/curate";
export { default as SvgCreditCard } from "@opal/icons/credit-card";
export { default as SvgDashboard } from "@opal/icons/dashboard";
@@ -110,6 +111,7 @@ export { default as SvgLmStudio } from "@opal/icons/lm-studio";
export { default as SvgLoader } from "@opal/icons/loader";
export { default as SvgLock } from "@opal/icons/lock";
export { default as SvgLogOut } from "@opal/icons/log-out";
export { default as SvgManageAgent } from "@opal/icons/manage-agent";
export { default as SvgMaximize2 } from "@opal/icons/maximize-2";
export { default as SvgMcp } from "@opal/icons/mcp";
export { default as SvgMenu } from "@opal/icons/menu";

View File

@@ -0,0 +1,27 @@
import type { IconProps } from "@opal/types";
const SvgManageAgent = ({ size, ...props }: IconProps) => (
<svg
width={size}
height={size}
viewBox="0 0 16 16"
fill="none"
xmlns="http://www.w3.org/2000/svg"
stroke="currentColor"
{...props}
>
<path
d="M4.5 2.5L8 1L11.5 2.5M13.5 4.5L15 8L13.5 11.5M11.5 13.5L8 15L4.5 13.5M2.5 11.5L1 7.99999L2.5 4.5"
strokeWidth={1.5}
strokeLinecap="round"
strokeLinejoin="round"
/>
<path
d="M6 11V8.75M6 6.75V5M6 6.75H4.75M6 6.75H7.25M10 11V9.25M10 9.25H8.75M10 9.25H11.25M10 7.25V5"
strokeWidth={1.5}
strokeLinecap="round"
strokeLinejoin="round"
/>
</svg>
);
export default SvgManageAgent;

View File

@@ -0,0 +1,163 @@
"use client";
import { ArrayHelpers, FieldArray, FormikProps, useField } from "formik";
import { ModelConfiguration } from "@/interfaces/llm";
import { ManualErrorMessage, TextFormField } from "@/components/Field";
import { useEffect, useState } from "react";
import CreateButton from "@/refresh-components/buttons/CreateButton";
import { Button } from "@opal/components";
import { SvgX } from "@opal/icons";
import Text from "@/refresh-components/texts/Text";
function ModelConfigurationRow({
name,
index,
arrayHelpers,
formikProps,
setError,
}: {
name: string;
index: number;
arrayHelpers: ArrayHelpers;
formikProps: FormikProps<{ model_configurations: ModelConfiguration[] }>;
setError: (value: string | null) => void;
}) {
const [, input] = useField(`${name}[${index}]`);
useEffect(() => {
if (!input.touched) return;
setError((input.error as { name: string } | undefined)?.name ?? null);
}, [input.touched, input.error]);
return (
<div key={index} className="flex flex-row w-full gap-4">
<div
className={`flex flex-[2] ${
input.touched && input.error ? "border-2 border-error rounded-lg" : ""
}`}
>
<TextFormField
name={`${name}[${index}].name`}
label=""
placeholder={`model-name-${index + 1}`}
removeLabel
hideError
/>
</div>
<div className="flex flex-[1]">
<TextFormField
name={`${name}[${index}].max_input_tokens`}
label=""
placeholder="Default"
removeLabel
hideError
type="number"
min={1}
/>
</div>
<div className="flex flex-col justify-center">
<Button
disabled={formikProps.values.model_configurations.length <= 1}
onClick={() => {
if (formikProps.values.model_configurations.length > 1) {
setError(null);
arrayHelpers.remove(index);
}
}}
icon={SvgX}
prominence="secondary"
/>
</div>
</div>
);
}
export function ModelConfigurationField({
name,
formikProps,
}: {
name: string;
formikProps: FormikProps<{ model_configurations: ModelConfiguration[] }>;
}) {
const [errorMap, setErrorMap] = useState<{ [index: number]: string }>({});
const [finalError, setFinalError] = useState<string | undefined>();
return (
<div className="pb-5 flex flex-col w-full">
<div className="flex flex-col">
<Text as="p" mainUiAction>
Model Configurations
</Text>
<Text as="p" secondaryBody text03>
Add models and customize the number of input tokens that they accept.
</Text>
</div>
<FieldArray
name={name}
render={(arrayHelpers: ArrayHelpers) => (
<div className="flex flex-col">
<div className="flex flex-col gap-4 py-4">
<div className="flex">
<Text as="p" secondaryBody className="flex flex-[2]">
Model Name
</Text>
<Text as="p" secondaryBody className="flex flex-[1]">
Max Input Tokens
</Text>
<div className="w-10" />
</div>
{formikProps.values.model_configurations.map((_, index) => (
<ModelConfigurationRow
key={index}
name={name}
formikProps={formikProps}
arrayHelpers={arrayHelpers}
index={index}
setError={(message: string | null) => {
const newErrors = { ...errorMap };
if (message) {
newErrors[index] = message;
} else {
delete newErrors[index];
for (const key in newErrors) {
const numKey = Number(key);
if (numKey > index) {
const errorValue = newErrors[key];
if (errorValue !== undefined) {
// Ensure the value is not undefined
newErrors[numKey - 1] = errorValue;
delete newErrors[numKey];
}
}
}
}
setErrorMap(newErrors);
setFinalError(
Object.values(newErrors).filter((item) => item)[0]
);
}}
/>
))}
</div>
{finalError && (
<ManualErrorMessage>{finalError}</ManualErrorMessage>
)}
<div className="mt-3">
<CreateButton
onClick={() => {
arrayHelpers.push({
name: "",
is_visible: true,
// Use null so Yup.number().nullable() accepts empty inputs
max_input_tokens: null,
});
}}
>
Add New
</CreateButton>
</div>
</div>
)}
/>
</div>
);
}

View File

@@ -1,18 +0,0 @@
import { defaultTailwindCSS } from "@/components/icons/icons";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { IconProps } from "@opal/types";
export interface ModelIconProps extends IconProps {
provider: string;
modelName?: string;
}
export default function ModelIcon({
provider,
modelName,
size = 16,
className = defaultTailwindCSS,
}: ModelIconProps) {
const Icon = getModelIcon(provider, modelName);
return <Icon size={size} className={className} />;
}

View File

@@ -0,0 +1,17 @@
import { defaultTailwindCSS, IconProps } from "@/components/icons/icons";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
export interface ProviderIconProps extends IconProps {
provider: string;
modelName?: string;
}
export const ProviderIcon = ({
provider,
modelName,
size = 16,
className = defaultTailwindCSS,
}: ProviderIconProps) => {
const Icon = getProviderIcon(provider, modelName);
return <Icon size={size} className={className} />;
};

View File

@@ -1 +1 @@
export { default } from "@/refresh-pages/admin/LLMProviderConfigurationPage";
export { default } from "@/refresh-pages/admin/LLMConfigurationPage";

View File

@@ -0,0 +1,622 @@
import { JSX } from "react";
import {
AnthropicIcon,
AmazonIcon,
AzureIcon,
CPUIcon,
MicrosoftIconSVG,
MistralIcon,
MetaIcon,
GeminiIcon,
IconProps,
DeepseekIcon,
OpenAISVG,
QwenIcon,
OllamaIcon,
LMStudioIcon,
LiteLLMIcon,
ZAIIcon,
} from "@/components/icons/icons";
import {
OllamaModelResponse,
OpenRouterModelResponse,
BedrockModelResponse,
LMStudioModelResponse,
LiteLLMProxyModelResponse,
BifrostModelResponse,
ModelConfiguration,
LLMProviderName,
BedrockFetchParams,
OllamaFetchParams,
LMStudioFetchParams,
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
import { SvgAws, SvgBifrost, SvgOpenrouter, SvgPlug } from "@opal/icons";
// Aggregator providers that host models from multiple vendors
export const AGGREGATOR_PROVIDERS = new Set([
"bedrock",
"bedrock_converse",
"openrouter",
"ollama_chat",
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
export const getProviderIcon = (
providerName: string,
modelName?: string
): (({ size, className }: IconProps) => JSX.Element) => {
const iconMap: Record<
string,
({ size, className }: IconProps) => JSX.Element
> = {
amazon: AmazonIcon,
phi: MicrosoftIconSVG,
mistral: MistralIcon,
ministral: MistralIcon,
llama: MetaIcon,
ollama_chat: OllamaIcon,
ollama: OllamaIcon,
lm_studio: LMStudioIcon,
gemini: GeminiIcon,
deepseek: DeepseekIcon,
claude: AnthropicIcon,
anthropic: AnthropicIcon,
openai: OpenAISVG,
// Azure OpenAI should display the Azure logo
azure: AzureIcon,
microsoft: MicrosoftIconSVG,
meta: MetaIcon,
google: GeminiIcon,
qwen: QwenIcon,
qwq: QwenIcon,
zai: ZAIIcon,
// Cloud providers - use AWS icon for Bedrock
bedrock: SvgAws,
bedrock_converse: SvgAws,
openrouter: SvgOpenrouter,
litellm_proxy: LiteLLMIcon,
bifrost: SvgBifrost,
openai_compatible: SvgPlug,
vertex_ai: GeminiIcon,
};
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers (bedrock, openrouter, vertex_ai), prioritize showing
// the vendor icon based on model name (e.g., show Claude icon for Bedrock Claude models)
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(iconMap)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in iconMap) {
const icon = iconMap[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(iconMap)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return CPUIcon;
};
export const isAnthropic = (provider: string, modelName?: string) =>
provider === LLMProviderName.ANTHROPIC ||
!!modelName?.toLowerCase().includes("claude");
/**
* Fetches Bedrock models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBedrockModels = async (
params: BedrockFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
if (!params.aws_region_name) {
return { models: [], error: "AWS region is required" };
}
try {
const response = await fetch("/api/admin/llm/bedrock/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
aws_region_name: params.aws_region_name,
aws_access_key_id: params.aws_access_key_id,
aws_secret_access_key: params.aws_secret_access_key,
aws_bearer_token_bedrock: params.aws_bearer_token_bedrock,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: BedrockModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: false,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Ollama models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOllamaModels = async (
params: OllamaFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/ollama/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OllamaModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches OpenRouter models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOpenRouterModels = async (
params: OpenRouterFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/openrouter/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse OpenRouter model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: OpenRouterModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LM Studio models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLMStudioModels = async (
params: LMStudioFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/lm-studio/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
api_key_changed: params.api_key_changed ?? false,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse LM Studio model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: LMStudioModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Bifrost models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBifrostModels = async (
params: BifrostFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/bifrost/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse Bifrost model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: BifrostModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLiteLLMProxyModels = async (
params: LiteLLMProxyFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/litellm/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: LiteLLMProxyModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.model_name,
display_name: modelData.model_name,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models for a provider. Accepts form values directly and maps them
* to the expected fetch params format internally.
*/
export const fetchModels = async (
providerName: string,
formValues: {
api_base?: string;
api_key?: string;
api_key_changed?: boolean;
name?: string;
custom_config?: Record<string, string>;
model_configurations?: ModelConfiguration[];
},
signal?: AbortSignal
) => {
const customConfig = formValues.custom_config || {};
switch (providerName) {
case LLMProviderName.BEDROCK:
return fetchBedrockModels({
aws_region_name: customConfig.AWS_REGION_NAME || "",
aws_access_key_id: customConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key: customConfig.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock: customConfig.AWS_BEARER_TOKEN_BEDROCK,
provider_name: formValues.name,
});
case LLMProviderName.OLLAMA_CHAT:
return fetchOllamaModels({
api_base: formValues.api_base,
provider_name: formValues.name,
signal,
});
case LLMProviderName.LM_STUDIO:
return fetchLMStudioModels({
api_base: formValues.api_base,
api_key: formValues.custom_config?.LM_STUDIO_API_KEY,
api_key_changed: formValues.api_key_changed ?? false,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENROUTER:
return fetchOpenRouterModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
});
case LLMProviderName.LITELLM_PROXY:
return fetchLiteLLMProxyModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.BIFROST:
return fetchBifrostModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
};
export function canProviderFetchModels(providerName?: string) {
if (!providerName) return false;
switch (providerName) {
case LLMProviderName.BEDROCK:
case LLMProviderName.OLLAMA_CHAT:
case LLMProviderName.LM_STUDIO:
case LLMProviderName.OPENROUTER:
case LLMProviderName.LITELLM_PROXY:
case LLMProviderName.BIFROST:
case LLMProviderName.OPENAI_COMPATIBLE:
return true;
default:
return false;
}
}

View File

@@ -5,7 +5,7 @@ import { Button } from "@opal/components";
import { Text } from "@opal/components";
import { ContentAction } from "@opal/layouts";
import { SvgEyeOff, SvgX } from "@opal/icons";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import AgentMessage, {
AgentMessageProps,
} from "@/app/app/message/messageComponents/AgentMessage";
@@ -71,7 +71,7 @@ export default function MultiModelPanel({
errorStackTrace,
errorDetails,
}: MultiModelPanelProps) {
const ModelIcon = getModelIcon(provider, modelName);
const ProviderIcon = getProviderIcon(provider, modelName);
const handlePanelClick = useCallback(() => {
if (!isHidden && !isPreferred) onSelect();
@@ -88,7 +88,7 @@ export default function MultiModelPanel({
sizePreset="main-ui"
variant="body"
paddingVariant="lg"
icon={ModelIcon}
icon={ProviderIcon}
title={isHidden ? markdown(`~~${displayName}~~`) : displayName}
rightChildren={
<div className="flex items-center gap-1 px-2">

View File

@@ -18,7 +18,7 @@ import {
isRecommendedModel,
} from "@/app/craft/onboarding/constants";
import { ToggleWarningModal } from "./ToggleWarningModal";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { Section } from "@/layouts/general-layouts";
import {
Accordion,
@@ -365,7 +365,9 @@ export function BuildLLMPopover({
const isExpanded = expandedGroups.includes(
group.providerKey
);
const ModelIcon = getModelIcon(group.providerKey);
const ProviderIcon = getProviderIcon(
group.providerKey
);
return (
<AccordionItem
@@ -377,7 +379,7 @@ export function BuildLLMPopover({
<AccordionTrigger className="flex items-center rounded-08 hover:no-underline hover:bg-background-tint-02 group [&>svg]:hidden w-full py-1">
<div className="flex items-center gap-1 shrink-0">
<div className="flex items-center justify-center size-5 shrink-0">
<ModelIcon size={16} />
<ProviderIcon size={16} />
</div>
<Text
secondaryBody

View File

@@ -24,6 +24,7 @@ import {
} from "@/app/craft/onboarding/constants";
import { LLMProviderDescriptor } from "@/interfaces/llm";
import { LLM_PROVIDERS_ADMIN_URL } from "@/lib/llmConfig/constants";
import { buildOnboardingInitialValues as buildInitialValues } from "@/sections/modals/llmConfig/utils";
import { testApiKeyHelper } from "@/sections/modals/llmConfig/svc";
import OnboardingInfoPages from "@/app/craft/onboarding/components/OnboardingInfoPages";
import OnboardingUserInfo from "@/app/craft/onboarding/components/OnboardingUserInfo";
@@ -220,8 +221,10 @@ export default function BuildOnboardingModal({
setConnectionStatus("testing");
setErrorMessage("");
const baseValues = buildInitialValues();
const providerName = `build-mode-${currentProviderConfig.providerName}`;
const payload = {
...baseValues,
name: providerName,
provider: currentProviderConfig.providerName,
api_key: apiKey,

View File

@@ -48,7 +48,7 @@ import NotAllowedModal from "@/app/craft/onboarding/components/NotAllowedModal";
import { useOnboarding } from "@/app/craft/onboarding/BuildOnboardingProvider";
import { useLLMProviders } from "@/hooks/useLLMProviders";
import { useUser } from "@/providers/UserProvider";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import {
getBuildUserPersona,
getPersonaInfo,
@@ -475,10 +475,10 @@ export default function BuildConfigPage() {
>
{pendingLlmSelection?.provider &&
(() => {
const ModelIcon = getModelIcon(
const ProviderIcon = getProviderIcon(
pendingLlmSelection.provider
);
return <ModelIcon className="w-4 h-4" />;
return <ProviderIcon className="w-4 h-4" />;
})()}
<Text mainUiAction>{pendingLlmDisplayName}</Text>
<SvgChevronDown className="w-4 h-4 text-text-03" />

View File

@@ -3,14 +3,14 @@
import { useMemo } from "react";
import { parseLlmDescriptor, structureValue } from "@/lib/llmConfig/utils";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import { createIcon } from "@/components/icons/icons";
interface LLMOption {
name: string;
value: string;
icon: ReturnType<typeof getModelIcon>;
icon: ReturnType<typeof getProviderIcon>;
modelName: string;
providerName: string;
provider: string;
@@ -85,7 +85,7 @@ export default function LLMSelector({
provider.provider,
modelConfiguration.name
),
icon: getModelIcon(provider.provider, modelConfiguration.name),
icon: getProviderIcon(provider.provider, modelConfiguration.name),
modelName: modelConfiguration.name,
providerName: provider.name,
provider: provider.provider,

View File

@@ -5,7 +5,6 @@ import { errorHandlingFetcher } from "@/lib/fetcher";
import { SWR_KEYS } from "@/lib/swr-keys";
import {
LLMProviderDescriptor,
LLMProviderName,
LLMProviderResponse,
LLMProviderView,
WellKnownLLMProviderDescriptor,
@@ -139,12 +138,12 @@ export function useAdminLLMProviders() {
* Used inside individual provider modals to pre-populate model lists
* before the user has entered credentials.
*
* @param providerName - The provider's API endpoint name (e.g. "openai", "anthropic").
* @param providerEndpoint - The provider's API endpoint name (e.g. "openai", "anthropic").
* Pass `null` to suppress the request.
*/
export function useWellKnownLLMProvider(providerName: LLMProviderName) {
export function useWellKnownLLMProvider(providerEndpoint: string | null) {
const { data, error, isLoading } = useSWR<WellKnownLLMProviderDescriptor>(
providerName ? SWR_KEYS.wellKnownLlmProvider(providerName) : null,
providerEndpoint ? SWR_KEYS.wellKnownLlmProvider(providerEndpoint) : null,
errorHandlingFetcher,
{
revalidateOnFocus: false,

View File

@@ -1,4 +1,7 @@
import type { OnboardingActions } from "@/interfaces/onboarding";
import type {
OnboardingState,
OnboardingActions,
} from "@/interfaces/onboarding";
export enum LLMProviderName {
OPENAI = "openai",
@@ -121,11 +124,14 @@ export interface LLMProviderFormProps {
existingLlmProvider?: LLMProviderView;
shouldMarkAsDefault?: boolean;
onOpenChange?: (open: boolean) => void;
/** Called after successful provider creation/update. */
onSuccess?: () => void | Promise<void>;
/** The current default model name for this provider (from the global default). */
defaultModelName?: string;
// Onboarding-specific (only when variant === "onboarding")
onboardingState?: OnboardingState;
onboardingActions?: OnboardingActions;
llmDescriptor?: WellKnownLLMProviderDescriptor;
}
// Param types for model fetching functions - use snake_case to match API structure

View File

@@ -1,9 +1,8 @@
"use client";
import type { RichStr, WithoutStyles } from "@opal/types";
import type { RichStr } from "@opal/types";
import { resolveStr } from "@opal/components/text/InlineMarkdown";
import Text from "@/refresh-components/texts/Text";
import Separator from "@/refresh-components/Separator";
import { SvgXOctagon, SvgAlertCircle } from "@opal/icons";
import { useField, useFormikContext } from "formik";
import { Section } from "@/layouts/general-layouts";
@@ -230,27 +229,9 @@ function ErrorTextLayout({ children, type = "error" }: ErrorTextLayoutProps) {
);
}
/**
* FieldSeparator - A horizontal rule with inline padding, used to visually separate field groups.
*/
function FieldSeparator() {
return <Separator noPadding className="p-2" />;
}
/**
* FieldPadder - Wraps a field in standard horizontal + vertical padding (`p-2 w-full`).
*/
type FieldPadderProps = WithoutStyles<React.HTMLAttributes<HTMLDivElement>>;
function FieldPadder(props: FieldPadderProps) {
return <div {...props} className="p-2 w-full" />;
}
export {
VerticalInputLayout as Vertical,
HorizontalInputLayout as Horizontal,
ErrorLayout as Error,
ErrorTextLayout,
FieldSeparator,
FieldPadder,
type FieldPadderProps,
};

View File

@@ -36,229 +36,397 @@ import {
SvgZoomIn,
} from "@opal/icons";
export interface FeatureFlags {
vectorDbEnabled: boolean;
kgExposed: boolean;
enableCloud: boolean;
enableEnterprise: boolean;
customAnalyticsEnabled: boolean;
hasSubscription: boolean;
hooksEnabled: boolean;
opensearchEnabled: boolean;
queryHistoryEnabled: boolean;
}
export interface AdminRouteEntry {
path: string;
icon: IconFunctionComponent;
title: string;
sidebarLabel: string;
requiredPermission: string;
section: string;
requiresEnterprise: boolean;
visibleWhen: ((flags: FeatureFlags) => boolean) | null;
}
/**
* Single source of truth for every admin route: path, icon, page-header
* title, and sidebar label.
*/
export const ADMIN_ROUTES = {
INDEXING_STATUS: {
path: "/admin/indexing/status",
icon: SvgBookOpen,
title: "Existing Connectors",
sidebarLabel: "Existing Connectors",
},
ADD_CONNECTOR: {
path: "/admin/add-connector",
icon: SvgUploadCloud,
title: "Add Connector",
sidebarLabel: "Add Connector",
},
DOCUMENT_SETS: {
path: "/admin/documents/sets",
icon: SvgFiles,
title: "Document Sets",
sidebarLabel: "Document Sets",
},
DOCUMENT_EXPLORER: {
path: "/admin/documents/explorer",
icon: SvgZoomIn,
title: "Document Explorer",
sidebarLabel: "Explorer",
},
DOCUMENT_FEEDBACK: {
path: "/admin/documents/feedback",
icon: SvgThumbsUp,
title: "Document Feedback",
sidebarLabel: "Feedback",
},
AGENTS: {
path: "/admin/agents",
icon: SvgOnyxOctagon,
title: "Agents",
sidebarLabel: "Agents",
},
SLACK_BOTS: {
path: "/admin/bots",
icon: SvgSlack,
title: "Slack Integration",
sidebarLabel: "Slack Integration",
},
DISCORD_BOTS: {
path: "/admin/discord-bot",
icon: SvgDiscordMono,
title: "Discord Integration",
sidebarLabel: "Discord Integration",
},
MCP_ACTIONS: {
path: "/admin/actions/mcp",
icon: SvgMcp,
title: "MCP Actions",
sidebarLabel: "MCP Actions",
},
OPENAPI_ACTIONS: {
path: "/admin/actions/open-api",
icon: SvgActions,
title: "OpenAPI Actions",
sidebarLabel: "OpenAPI Actions",
},
STANDARD_ANSWERS: {
path: "/admin/standard-answer",
icon: SvgClipboard,
title: "Standard Answers",
sidebarLabel: "Standard Answers",
},
GROUPS: {
path: "/admin/groups",
icon: SvgUsers,
title: "Manage User Groups",
sidebarLabel: "Groups",
},
CHAT_PREFERENCES: {
path: "/admin/configuration/chat-preferences",
icon: SvgBubbleText,
title: "Chat Preferences",
sidebarLabel: "Chat Preferences",
},
// ── System Configuration (unlabeled section) ──────────────────────
LLM_MODELS: {
path: "/admin/configuration/llm",
icon: SvgCpu,
title: "Language Models",
sidebarLabel: "Language Models",
requiredPermission: "manage:llms",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
WEB_SEARCH: {
path: "/admin/configuration/web-search",
icon: SvgGlobe,
title: "Web Search",
sidebarLabel: "Web Search",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
IMAGE_GENERATION: {
path: "/admin/configuration/image-generation",
icon: SvgImage,
title: "Image Generation",
sidebarLabel: "Image Generation",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
VOICE: {
path: "/admin/configuration/voice",
icon: SvgAudio,
title: "Voice",
sidebarLabel: "Voice",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
CODE_INTERPRETER: {
path: "/admin/configuration/code-interpreter",
icon: SvgTerminal,
title: "Code Interpreter",
sidebarLabel: "Code Interpreter",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
INDEX_SETTINGS: {
path: "/admin/configuration/search",
icon: SvgSearchMenu,
title: "Index Settings",
sidebarLabel: "Index Settings",
},
DOCUMENT_PROCESSING: {
path: "/admin/configuration/document-processing",
icon: SvgFileText,
title: "Document Processing",
sidebarLabel: "Document Processing",
CHAT_PREFERENCES: {
path: "/admin/configuration/chat-preferences",
icon: SvgBubbleText,
title: "Chat Preferences",
sidebarLabel: "Chat Preferences",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
KNOWLEDGE_GRAPH: {
path: "/admin/kg",
icon: SvgNetworkGraph,
title: "Knowledge Graph",
sidebarLabel: "Knowledge Graph",
},
USERS: {
path: "/admin/users",
icon: SvgUser,
title: "Users & Requests",
sidebarLabel: "Users",
},
API_KEYS: {
path: "/admin/service-accounts",
icon: SvgUserKey,
title: "Service Accounts",
sidebarLabel: "Service Accounts",
},
TOKEN_RATE_LIMITS: {
path: "/admin/token-rate-limits",
icon: SvgProgressBars,
title: "Spending Limits",
sidebarLabel: "Spending Limits",
},
USAGE: {
path: "/admin/performance/usage",
icon: SvgActivity,
title: "Usage Statistics",
sidebarLabel: "Usage Statistics",
},
QUERY_HISTORY: {
path: "/admin/performance/query-history",
icon: SvgHistory,
title: "Query History",
sidebarLabel: "Query History",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled && f.kgExposed,
},
CUSTOM_ANALYTICS: {
path: "/admin/performance/custom-analytics",
icon: SvgBarChart,
title: "Custom Analytics",
sidebarLabel: "Custom Analytics",
requiredPermission: "admin",
section: "",
requiresEnterprise: true,
visibleWhen: (f: FeatureFlags) =>
!f.enableCloud && f.customAnalyticsEnabled,
},
THEME: {
path: "/admin/theme",
icon: SvgPaintBrush,
title: "Appearance & Theming",
sidebarLabel: "Appearance & Theming",
// ── Agents & Actions ──────────────────────────────────────────────
AGENTS: {
path: "/admin/agents",
icon: SvgOnyxOctagon,
title: "Agents",
sidebarLabel: "Agents",
requiredPermission: "admin",
section: "Agents & Actions",
requiresEnterprise: false,
visibleWhen: null,
},
BILLING: {
path: "/admin/billing",
icon: SvgWallet,
title: "Plans & Billing",
sidebarLabel: "Plans & Billing",
MCP_ACTIONS: {
path: "/admin/actions/mcp",
icon: SvgMcp,
title: "MCP Actions",
sidebarLabel: "MCP Actions",
requiredPermission: "admin",
section: "Agents & Actions",
requiresEnterprise: false,
visibleWhen: null,
},
OPENAPI_ACTIONS: {
path: "/admin/actions/open-api",
icon: SvgActions,
title: "OpenAPI Actions",
sidebarLabel: "OpenAPI Actions",
requiredPermission: "admin",
section: "Agents & Actions",
requiresEnterprise: false,
visibleWhen: null,
},
// ── Documents & Knowledge ─────────────────────────────────────────
INDEXING_STATUS: {
path: "/admin/indexing/status",
icon: SvgBookOpen,
title: "Existing Connectors",
sidebarLabel: "Existing Connectors",
requiredPermission: "admin",
section: "Documents & Knowledge",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled,
},
ADD_CONNECTOR: {
path: "/admin/add-connector",
icon: SvgUploadCloud,
title: "Add Connector",
sidebarLabel: "Add Connector",
requiredPermission: "admin",
section: "Documents & Knowledge",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled,
},
DOCUMENT_SETS: {
path: "/admin/documents/sets",
icon: SvgFiles,
title: "Document Sets",
sidebarLabel: "Document Sets",
requiredPermission: "admin",
section: "Documents & Knowledge",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled,
},
DOCUMENT_EXPLORER: {
path: "/admin/documents/explorer",
icon: SvgZoomIn,
title: "Document Explorer",
sidebarLabel: "",
requiredPermission: "admin",
section: "Documents & Knowledge",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled,
},
DOCUMENT_FEEDBACK: {
path: "/admin/documents/feedback",
icon: SvgThumbsUp,
title: "Document Feedback",
sidebarLabel: "",
requiredPermission: "admin",
section: "Documents & Knowledge",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled,
},
INDEX_SETTINGS: {
path: "/admin/configuration/search",
icon: SvgSearchMenu,
title: "Index Settings",
sidebarLabel: "Index Settings",
requiredPermission: "admin",
section: "Documents & Knowledge",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled && !f.enableCloud,
},
DOCUMENT_PROCESSING: {
path: "/admin/configuration/document-processing",
icon: SvgFileText,
title: "Document Processing",
sidebarLabel: "",
requiredPermission: "admin",
section: "Documents & Knowledge",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled,
},
INDEX_MIGRATION: {
path: "/admin/document-index-migration",
icon: SvgArrowExchange,
title: "Document Index Migration",
sidebarLabel: "Document Index Migration",
requiredPermission: "admin",
section: "Documents & Knowledge",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.vectorDbEnabled && f.opensearchEnabled,
},
// ── Integrations ──────────────────────────────────────────────────
API_KEYS: {
path: "/admin/service-accounts",
icon: SvgUserKey,
title: "Service Accounts",
sidebarLabel: "Service Accounts",
requiredPermission: "admin",
section: "Integrations",
requiresEnterprise: false,
visibleWhen: null,
},
SLACK_BOTS: {
path: "/admin/bots",
icon: SvgSlack,
title: "Slack Integration",
sidebarLabel: "Slack Integration",
requiredPermission: "admin",
section: "Integrations",
requiresEnterprise: false,
visibleWhen: null,
},
DISCORD_BOTS: {
path: "/admin/discord-bot",
icon: SvgDiscordMono,
title: "Discord Integration",
sidebarLabel: "Discord Integration",
requiredPermission: "admin",
section: "Integrations",
requiresEnterprise: false,
visibleWhen: null,
},
HOOKS: {
path: "/admin/hooks",
icon: SvgShareWebhook,
title: "Hook Extensions",
sidebarLabel: "Hook Extensions",
requiredPermission: "admin",
section: "Integrations",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.hooksEnabled,
},
// ── Permissions ───────────────────────────────────────────────────
USERS: {
path: "/admin/users",
icon: SvgUser,
title: "Users & Requests",
sidebarLabel: "Users",
requiredPermission: "admin",
section: "Permissions",
requiresEnterprise: false,
visibleWhen: null,
},
GROUPS: {
path: "/admin/groups",
icon: SvgUsers,
title: "Manage User Groups",
sidebarLabel: "Groups",
requiredPermission: "admin",
section: "Permissions",
requiresEnterprise: true,
visibleWhen: null,
},
SCIM: {
path: "/admin/scim",
icon: SvgUserSync,
title: "SCIM",
sidebarLabel: "SCIM",
requiredPermission: "admin",
section: "Permissions",
requiresEnterprise: true,
visibleWhen: null,
},
// ── Organization ──────────────────────────────────────────────────
BILLING: {
path: "/admin/billing",
icon: SvgWallet,
title: "Plans & Billing",
sidebarLabel: "Plans & Billing",
requiredPermission: "admin",
section: "Organization",
requiresEnterprise: false,
visibleWhen: (f: FeatureFlags) => f.hasSubscription,
},
TOKEN_RATE_LIMITS: {
path: "/admin/token-rate-limits",
icon: SvgProgressBars,
title: "Spending Limits",
sidebarLabel: "Spending Limits",
requiredPermission: "admin",
section: "Organization",
requiresEnterprise: true,
visibleWhen: null,
},
THEME: {
path: "/admin/theme",
icon: SvgPaintBrush,
title: "Appearance & Theming",
sidebarLabel: "Appearance & Theming",
requiredPermission: "admin",
section: "Organization",
requiresEnterprise: true,
visibleWhen: null,
},
// ── Usage ─────────────────────────────────────────────────────────
USAGE: {
path: "/admin/performance/usage",
icon: SvgActivity,
title: "Usage Statistics",
sidebarLabel: "Usage Statistics",
requiredPermission: "admin",
section: "Usage",
requiresEnterprise: true,
visibleWhen: null,
},
QUERY_HISTORY: {
path: "/admin/performance/query-history",
icon: SvgHistory,
title: "Query History",
sidebarLabel: "Query History",
requiredPermission: "admin",
section: "Usage",
requiresEnterprise: true,
visibleWhen: (f: FeatureFlags) => f.queryHistoryEnabled,
},
// ── Other (admin-only) ────────────────────────────────────────────
STANDARD_ANSWERS: {
path: "/admin/standard-answer",
icon: SvgClipboard,
title: "Standard Answers",
sidebarLabel: "",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
DEBUG: {
path: "/admin/debug",
icon: SvgDownload,
title: "Debug Logs",
sidebarLabel: "Debug Logs",
sidebarLabel: "",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
// Prefix-only entries used for layout matching — not rendered as sidebar
// items or page headers.
// ── Prefix-only entries (layout matching, not sidebar items) ──────
DOCUMENTS: {
path: "/admin/documents",
icon: SvgEmpty,
title: "",
sidebarLabel: "",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
PERFORMANCE: {
path: "/admin/performance",
icon: SvgEmpty,
title: "",
sidebarLabel: "",
requiredPermission: "admin",
section: "",
requiresEnterprise: false,
visibleWhen: null,
},
} as const satisfies Record<string, AdminRouteEntry>;

View File

@@ -0,0 +1,75 @@
import { IconFunctionComponent } from "@opal/types";
import { SvgArrowUpCircle } from "@opal/icons";
import {
ADMIN_ROUTES,
AdminRouteEntry,
FeatureFlags,
sidebarItem,
} from "@/lib/admin-routes";
import { hasPermission } from "@/lib/permissions";
import { CombinedSettings } from "@/interfaces/settings";
export type { FeatureFlags } from "@/lib/admin-routes";
export interface SidebarItemEntry {
section: string;
name: string;
icon: IconFunctionComponent;
link: string;
error?: boolean;
disabled?: boolean;
}
export function buildItems(
permissions: string[],
flags: FeatureFlags,
settings: CombinedSettings | null
): SidebarItemEntry[] {
const can = (perm: string) => hasPermission(permissions, perm);
const items: SidebarItemEntry[] = [];
for (const route of Object.values(ADMIN_ROUTES) as AdminRouteEntry[]) {
if (!route.sidebarLabel) continue;
if (!can(route.requiredPermission)) continue;
if (route.visibleWhen && !route.visibleWhen(flags)) continue;
const item: SidebarItemEntry = {
...sidebarItem(route),
section: route.section,
disabled: route.requiresEnterprise && !flags.enableEnterprise,
};
// Special case: INDEX_SETTINGS shows reindexing error indicator
if (route.path === ADMIN_ROUTES.INDEX_SETTINGS.path) {
item.error = settings?.settings.needs_reindexing;
}
items.push(item);
}
// Upgrade Plan — only for full admins without a subscription
if (can("admin") && !flags.hasSubscription) {
items.push({
section: "",
name: "Upgrade Plan",
icon: SvgArrowUpCircle,
link: ADMIN_ROUTES.BILLING.path,
});
}
return items;
}
/** Preserve section ordering while grouping consecutive items by section. */
export function groupBySection(items: SidebarItemEntry[]) {
const groups: { section: string; items: SidebarItemEntry[] }[] = [];
for (const item of items) {
const last = groups[groups.length - 1];
if (last && last.section === item.section) {
last.items.push(item);
} else {
groups.push({ section: item.section, items: [item] });
}
}
return groups;
}

View File

@@ -5,6 +5,7 @@ import {
getCurrentUserSS,
} from "@/lib/userSS";
import { AuthType } from "@/lib/constants";
import { hasAnyAdminPermission } from "@/lib/permissions";
/**
* Result of an authentication check.
@@ -71,13 +72,6 @@ export async function requireAuth(): Promise<AuthCheckResult> {
};
}
// Allowlist of roles that can access admin pages (all roles except BASIC)
const ADMIN_ALLOWED_ROLES = [
UserRole.ADMIN,
UserRole.CURATOR,
UserRole.GLOBAL_CURATOR,
];
/**
* Requires that the user is authenticated AND has admin role.
* If not authenticated, redirects to login.
@@ -106,8 +100,12 @@ export async function requireAdminAuth(): Promise<AuthCheckResult> {
const { user, authTypeMetadata } = authResult;
// Check if user has an allowed role
if (user && !ADMIN_ALLOWED_ROLES.includes(user.role)) {
// Check if user has admin role or any admin permission via groups
if (
user &&
user.role !== UserRole.ADMIN &&
!hasAnyAdminPermission(user.effective_permissions ?? [])
) {
return {
user,
authTypeMetadata,

View File

@@ -32,7 +32,7 @@ import {
PersonaLabel,
} from "@/app/admin/agents/interfaces";
import { DefaultModel, LLMProviderDescriptor } from "@/interfaces/llm";
import { isAnthropic } from "@/lib/llmConfig/svc";
import { isAnthropic } from "@/app/admin/configuration/llm/utils";
import { getSourceMetadataForSources } from "./sources";
import { AuthType, NEXT_PUBLIC_CLOUD_ENABLED } from "./constants";
import { useUser } from "@/providers/UserProvider";

View File

@@ -14,28 +14,8 @@ import {
SvgLitellm,
SvgLmStudio,
} from "@opal/icons";
import {
MicrosoftIconSVG,
MistralIcon,
MetaIcon,
DeepseekIcon,
QwenIcon,
ZAIIcon,
} from "@/components/icons/icons";
import { LLMProviderName } from "@/interfaces/llm";
export const AGGREGATOR_PROVIDERS = new Set([
LLMProviderName.BEDROCK,
"bedrock_converse",
LLMProviderName.OPENROUTER,
LLMProviderName.OLLAMA_CHAT,
LLMProviderName.LM_STUDIO,
LLMProviderName.LITELLM_PROXY,
LLMProviderName.BIFROST,
LLMProviderName.OPENAI_COMPATIBLE,
LLMProviderName.VERTEX_AI,
]);
const PROVIDER_ICONS: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
@@ -101,80 +81,3 @@ export function getProviderDisplayName(providerName: string): string {
export function getProviderIcon(providerName: string): IconFunctionComponent {
return PROVIDER_ICONS[providerName] ?? SvgCpu;
}
// ---------------------------------------------------------------------------
// Model-aware icon resolver (legacy icon set)
// ---------------------------------------------------------------------------
const MODEL_ICON_MAP: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENAI]: SvgOpenai,
[LLMProviderName.ANTHROPIC]: SvgClaude,
[LLMProviderName.OLLAMA_CHAT]: SvgOllama,
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
[LLMProviderName.VERTEX_AI]: SvgGemini,
[LLMProviderName.BEDROCK]: SvgAws,
[LLMProviderName.LITELLM_PROXY]: SvgLitellm,
[LLMProviderName.BIFROST]: SvgBifrost,
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
amazon: SvgAws,
phi: MicrosoftIconSVG,
mistral: MistralIcon,
ministral: MistralIcon,
llama: MetaIcon,
ollama: SvgOllama,
gemini: SvgGemini,
deepseek: DeepseekIcon,
claude: SvgClaude,
azure: SvgAzure,
microsoft: MicrosoftIconSVG,
meta: MetaIcon,
google: SvgGemini,
qwen: QwenIcon,
qwq: QwenIcon,
zai: ZAIIcon,
bedrock_converse: SvgAws,
};
/**
* Model-aware icon resolver that checks both provider name and model name
* to pick the most specific icon (e.g. Claude icon for a Bedrock Claude model).
*/
export const getModelIcon = (
providerName: string,
modelName?: string
): IconFunctionComponent => {
const lowerProviderName = providerName.toLowerCase();
// For aggregator providers, prioritise showing the vendor icon based on model name
if (AGGREGATOR_PROVIDERS.has(lowerProviderName) && modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Check if provider name directly matches an icon
if (lowerProviderName in MODEL_ICON_MAP) {
const icon = MODEL_ICON_MAP[lowerProviderName];
if (icon) {
return icon;
}
}
// For non-aggregator providers, check if model name contains any of the keys
if (modelName) {
const lowerModelName = modelName.toLowerCase();
for (const [key, icon] of Object.entries(MODEL_ICON_MAP)) {
if (lowerModelName.includes(key)) {
return icon;
}
}
}
// Fallback to CPU icon if no matches
return SvgCpu;
};

View File

@@ -1,5 +1,5 @@
/**
* LLM action functions for mutations and model fetching.
* LLM action functions for mutations.
*
* These are async functions for one-off actions that don't need SWR caching.
*
@@ -7,31 +7,12 @@
* - /api/admin/llm/test/default - Test the default LLM provider connection
* - /api/admin/llm/default - Set the default LLM model
* - /api/admin/llm/provider/{id} - Delete an LLM provider
* - /api/admin/llm/{provider}/available-models - Fetch available models for a provider
*/
import {
LLM_ADMIN_URL,
LLM_PROVIDERS_ADMIN_URL,
} from "@/lib/llmConfig/constants";
import {
OllamaModelResponse,
OpenRouterModelResponse,
BedrockModelResponse,
LMStudioModelResponse,
LiteLLMProxyModelResponse,
BifrostModelResponse,
ModelConfiguration,
LLMProviderName,
BedrockFetchParams,
OllamaFetchParams,
LMStudioFetchParams,
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
/**
* Test the default LLM provider.
@@ -76,522 +57,15 @@ export async function setDefaultLlmModel(
/**
* Delete an LLM provider.
* @param providerId - The provider ID to delete
* @param force - Force delete even if this is the default provider
* @throws Error with the detail message from the API on failure
*/
export async function deleteLlmProvider(
providerId: number,
force = false
): Promise<void> {
const url = force
? `${LLM_PROVIDERS_ADMIN_URL}/${providerId}?force=true`
: `${LLM_PROVIDERS_ADMIN_URL}/${providerId}`;
const response = await fetch(url, { method: "DELETE" });
export async function deleteLlmProvider(providerId: number): Promise<void> {
const response = await fetch(`${LLM_PROVIDERS_ADMIN_URL}/${providerId}`, {
method: "DELETE",
});
if (!response.ok) {
const errorMsg = (await response.json()).detail;
throw new Error(errorMsg);
}
}
// ---------------------------------------------------------------------------
// Aggregator providers & helpers
// ---------------------------------------------------------------------------
/** Aggregator providers that host models from multiple vendors. */
export const AGGREGATOR_PROVIDERS = new Set([
"bedrock",
"bedrock_converse",
"openrouter",
"ollama_chat",
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
export const isAnthropic = (provider: string, modelName?: string) =>
provider === LLMProviderName.ANTHROPIC ||
!!modelName?.toLowerCase().includes("claude");
// ---------------------------------------------------------------------------
// Model fetching
// ---------------------------------------------------------------------------
/**
* Fetches Bedrock models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBedrockModels = async (
params: BedrockFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
if (!params.aws_region_name) {
return { models: [], error: "AWS region is required" };
}
try {
const response = await fetch("/api/admin/llm/bedrock/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
aws_region_name: params.aws_region_name,
aws_access_key_id: params.aws_access_key_id,
aws_secret_access_key: params.aws_secret_access_key,
aws_bearer_token_bedrock: params.aws_bearer_token_bedrock,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: BedrockModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: false,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Ollama models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOllamaModels = async (
params: OllamaFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/ollama/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OllamaModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches OpenRouter models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchOpenRouterModels = async (
params: OpenRouterFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/openrouter/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse OpenRouter model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: OpenRouterModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LM Studio models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLMStudioModels = async (
params: LMStudioFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/lm-studio/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
api_key_changed: params.api_key_changed ?? false,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse LM Studio model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: LMStudioModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches Bifrost models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchBifrostModels = async (
params: BifrostFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch("/api/admin/llm/bifrost/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch (jsonError) {
console.warn(
"Failed to parse Bifrost model fetch error response",
jsonError
);
}
return { models: [], error: errorMessage };
}
const data: BifrostModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
*/
export const fetchLiteLLMProxyModels = async (
params: LiteLLMProxyFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
const apiKey = params.api_key;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
if (!apiKey) {
return { models: [], error: "API Key is required" };
}
try {
const response = await fetch("/api/admin/llm/litellm/available-models", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: apiKey,
provider_name: params.provider_name,
}),
signal: params.signal,
});
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: LiteLLMProxyModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.model_name,
display_name: modelData.model_name,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches models for a provider. Accepts form values directly and maps them
* to the expected fetch params format internally.
*/
export const fetchModels = async (
providerName: string,
formValues: {
api_base?: string;
api_key?: string;
api_key_changed?: boolean;
name?: string;
custom_config?: Record<string, string>;
model_configurations?: ModelConfiguration[];
},
signal?: AbortSignal
) => {
const customConfig = formValues.custom_config || {};
switch (providerName) {
case LLMProviderName.BEDROCK:
return fetchBedrockModels({
aws_region_name: customConfig.AWS_REGION_NAME || "",
aws_access_key_id: customConfig.AWS_ACCESS_KEY_ID,
aws_secret_access_key: customConfig.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock: customConfig.AWS_BEARER_TOKEN_BEDROCK,
provider_name: formValues.name,
});
case LLMProviderName.OLLAMA_CHAT:
return fetchOllamaModels({
api_base: formValues.api_base,
provider_name: formValues.name,
signal,
});
case LLMProviderName.LM_STUDIO:
return fetchLMStudioModels({
api_base: formValues.api_base,
api_key: formValues.custom_config?.LM_STUDIO_API_KEY,
api_key_changed: formValues.api_key_changed ?? false,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENROUTER:
return fetchOpenRouterModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
});
case LLMProviderName.LITELLM_PROXY:
return fetchLiteLLMProxyModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.BIFROST:
return fetchBifrostModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
};

View File

@@ -0,0 +1,38 @@
import { LLMProviderResponse, VisionProvider } from "@/interfaces/llm";
import { LLM_ADMIN_URL } from "@/lib/llmConfig/constants";
export async function fetchVisionProviders(): Promise<VisionProvider[]> {
const response = await fetch(`${LLM_ADMIN_URL}/vision-providers`, {
headers: {
"Content-Type": "application/json",
},
});
if (!response.ok) {
throw new Error(
`Failed to fetch vision providers: ${await response.text()}`
);
}
const data = (await response.json()) as LLMProviderResponse<VisionProvider>;
return data.providers;
}
export async function setDefaultVisionProvider(
providerId: number,
visionModel: string
): Promise<void> {
const response = await fetch(`${LLM_ADMIN_URL}/default-vision`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
provider_id: providerId,
model_name: visionModel,
}),
});
if (!response.ok) {
const errorMsg = await response.text();
throw new Error(errorMsg);
}
}

View File

@@ -0,0 +1,36 @@
import { ADMIN_ROUTES } from "@/lib/admin-routes";
// Derived from ADMIN_ROUTES — no hardcoded list to maintain.
// "admin" is the full-access override token, not a regular permission.
const ADMIN_ROUTE_PERMISSIONS: Set<string> = new Set(
Object.values(ADMIN_ROUTES)
.map((r) => r.requiredPermission)
.filter((p) => p !== "admin")
);
export function hasAnyAdminPermission(permissions: string[]): boolean {
if (permissions.includes("admin")) return true;
return permissions.some((p) => ADMIN_ROUTE_PERMISSIONS.has(p));
}
export function hasPermission(
permissions: string[],
...required: string[]
): boolean {
if (permissions.includes("admin")) return true;
return required.some((r) => permissions.includes(r));
}
export function getFirstPermittedAdminRoute(permissions: string[]): string {
for (const route of Object.values(ADMIN_ROUTES)) {
if (!route.sidebarLabel) continue;
if (
permissions.includes("admin") ||
permissions.includes(route.requiredPermission)
) {
return route.path;
}
}
// Fallback — should not be reached if hasAdminAccess is checked first
return ADMIN_ROUTES.AGENTS.path;
}

View File

@@ -94,6 +94,9 @@ export const SWR_KEYS = {
// ── Groups ────────────────────────────────────────────────────────────────
adminUserGroups: "/api/manage/admin/user-group",
shareableGroups: "/api/manage/user-groups/minimal",
userGroupPermissions: (groupId: number) =>
`/api/manage/admin/user-group/${groupId}/permissions`,
permissionRegistry: "/api/manage/admin/permissions/registry",
scimToken: "/api/admin/enterprise-settings/scim/token",
// ── MCP Servers ───────────────────────────────────────────────────────────

View File

@@ -126,6 +126,7 @@ export interface User {
password_configured?: boolean;
tenant_info?: TenantInfo | null;
personalization?: UserPersonalization;
effective_permissions?: string[];
}
export interface TenantInfo {

View File

@@ -15,6 +15,7 @@ import {
UserRole,
ThemePreference,
} from "@/lib/types";
import { hasAnyAdminPermission } from "@/lib/permissions";
import { usePostHog } from "posthog-js/react";
import { SettingsContext } from "@/providers/SettingsProvider";
import { useTokenRefresh } from "@/hooks/useTokenRefresh";
@@ -26,10 +27,14 @@ import {
import { updateUserPersonalization as persistPersonalization } from "@/lib/userSettings";
import { useTheme } from "next-themes";
const EMPTY_PERMISSIONS: string[] = [];
interface UserContextType {
user: User | null;
isAdmin: boolean;
isCurator: boolean;
hasAdminAccess: boolean;
permissions: string[];
refreshUser: () => Promise<void>;
isCloudSuperuser: boolean;
authTypeMetadata: AuthTypeMetadata;
@@ -523,6 +528,10 @@ export function UserProvider({ children }: { children: React.ReactNode }) {
isCurator:
upToDateUser?.role === UserRole.CURATOR ||
upToDateUser?.role === UserRole.GLOBAL_CURATOR,
hasAdminAccess: hasAnyAdminPermission(
upToDateUser?.effective_permissions ?? EMPTY_PERMISSIONS
),
permissions: upToDateUser?.effective_permissions ?? EMPTY_PERMISSIONS,
isCloudSuperuser: upToDateUser?.is_cloud_superuser ?? false,
}}
>

View File

@@ -1,7 +1,7 @@
"use client";
import React from "react";
import type { IconProps, RichStr } from "@opal/types";
import type { IconProps } from "@opal/types";
import Text from "@/refresh-components/texts/Text";
import { Button } from "@opal/components";
import Modal from "@/refresh-components/Modal";
@@ -9,8 +9,8 @@ import { useModalClose } from "../contexts/ModalContext";
export interface ConfirmationModalProps {
icon: React.FunctionComponent<IconProps>;
title: string | RichStr;
description?: string | RichStr;
title: string;
description?: string;
children?: React.ReactNode;
submit: React.ReactNode;

View File

@@ -4,9 +4,11 @@ import { useState, useEffect, useCallback, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmDescriptor, LlmManager } from "@/lib/hooks";
import { structureValue } from "@/lib/llmConfig/utils";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { AGGREGATOR_PROVIDERS } from "@/lib/llmConfig/svc";
import {
getProviderIcon,
AGGREGATOR_PROVIDERS,
} from "@/app/admin/configuration/llm/utils";
import { LLMProviderDescriptor } from "@/interfaces/llm";
import { Slider } from "@/components/ui/slider";
import { useUser } from "@/providers/UserProvider";
import Text from "@/refresh-components/texts/Text";
@@ -53,7 +55,7 @@ export function groupLlmOptions(
groups.set(groupKey, {
displayName,
options: [],
Icon: getModelIcon(provider),
Icon: getProviderIcon(provider),
});
}
@@ -191,7 +193,7 @@ export default function LLMPopover({
icon={
foldable
? SvgRefreshCw
: getModelIcon(
: getProviderIcon(
llmManager.currentLlm.provider,
llmManager.currentLlm.modelName
)

View File

@@ -3,7 +3,7 @@
import { useState, useMemo, useRef } from "react";
import Popover from "@/refresh-components/Popover";
import { LlmManager } from "@/lib/hooks";
import { getModelIcon } from "@/lib/llmConfig/providers";
import { getProviderIcon } from "@/app/admin/configuration/llm/utils";
import { Button, SelectButton, OpenButton } from "@opal/components";
import { SvgPlusCircle, SvgX } from "@opal/icons";
import { LLMOption } from "@/refresh-components/popovers/interfaces";
@@ -152,7 +152,7 @@ export default function ModelSelector({
)}
<div className="flex items-center shrink-0">
{selectedModels.map((model, index) => {
const ProviderIcon = getModelIcon(
const ProviderIcon = getProviderIcon(
model.provider,
model.modelName
);

View File

@@ -19,9 +19,11 @@ import {
updateAgentGroupSharing,
updateDocSetGroupSharing,
saveTokenLimits,
saveGroupPermissions,
} from "./svc";
import { memberTableColumns, PAGE_SIZE } from "./shared";
import SharedGroupResources from "@/refresh-pages/admin/GroupsPage/SharedGroupResources";
import GroupPermissionsSection from "./GroupPermissionsSection";
import TokenLimitSection from "./TokenLimitSection";
import type { TokenLimit } from "./TokenLimitSection";
@@ -34,6 +36,9 @@ function CreateGroupPage() {
const [selectedCcPairIds, setSelectedCcPairIds] = useState<number[]>([]);
const [selectedDocSetIds, setSelectedDocSetIds] = useState<number[]>([]);
const [selectedAgentIds, setSelectedAgentIds] = useState<number[]>([]);
const [enabledPermissions, setEnabledPermissions] = useState<Set<string>>(
new Set()
);
const [tokenLimits, setTokenLimits] = useState<TokenLimit[]>([
{ tokenBudget: null, periodHours: null },
]);
@@ -54,6 +59,7 @@ function CreateGroupPage() {
selectedUserIds,
selectedCcPairIds
);
await saveGroupPermissions(groupId, enabledPermissions);
await updateAgentGroupSharing(groupId, [], selectedAgentIds);
await updateDocSetGroupSharing(groupId, [], selectedDocSetIds);
await saveTokenLimits(groupId, tokenLimits, []);
@@ -153,6 +159,11 @@ function CreateGroupPage() {
/>
</Section>
)}
<GroupPermissionsSection
enabledPermissions={enabledPermissions}
onPermissionsChange={setEnabledPermissions}
/>
<SharedGroupResources
selectedCcPairIds={selectedCcPairIds}
onCcPairIdsChange={setSelectedCcPairIds}

View File

@@ -30,9 +30,11 @@ import {
updateAgentGroupSharing,
updateDocSetGroupSharing,
saveTokenLimits,
saveGroupPermissions,
} from "./svc";
import { SWR_KEYS } from "@/lib/swr-keys";
import SharedGroupResources from "@/refresh-pages/admin/GroupsPage/SharedGroupResources";
import GroupPermissionsSection from "./GroupPermissionsSection";
import TokenLimitSection from "./TokenLimitSection";
import type { TokenLimit } from "./TokenLimitSection";
@@ -75,6 +77,11 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
TokenRateLimitDisplay[]
>(SWR_KEYS.userGroupTokenRateLimit(groupId), errorHandlingFetcher);
// Fetch permissions for this group
const { data: groupPermissions, isLoading: permissionsLoading } = useSWR<
string[]
>(SWR_KEYS.userGroupPermissions(groupId), errorHandlingFetcher);
// Form state
const [groupName, setGroupName] = useState("");
const [selectedUserIds, setSelectedUserIds] = useState<string[]>([]);
@@ -87,6 +94,9 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
const [tokenLimits, setTokenLimits] = useState<TokenLimit[]>([
{ tokenBudget: null, periodHours: null },
]);
const [enabledPermissions, setEnabledPermissions] = useState<Set<string>>(
new Set()
);
const [showDeleteModal, setShowDeleteModal] = useState(false);
const [isDeleting, setIsDeleting] = useState(false);
const [initialized, setInitialized] = useState(false);
@@ -101,7 +111,11 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
error: candidatesError,
} = useGroupMemberCandidates();
const isLoading = groupLoading || candidatesLoading || tokenLimitsLoading;
const isLoading =
groupLoading ||
candidatesLoading ||
tokenLimitsLoading ||
permissionsLoading;
const error = groupError ?? candidatesError;
// Pre-populate form when group data loads
@@ -132,6 +146,13 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
}
}, [tokenRateLimits]);
// Pre-populate permissions when fetched
useEffect(() => {
if (groupPermissions) {
setEnabledPermissions(new Set(groupPermissions));
}
}, [groupPermissions]);
const memberRows = useMemo(() => {
const selected = new Set(selectedUserIds);
return allRows.filter((r) => selected.has(r.id ?? r.email));
@@ -233,6 +254,9 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
selectedDocSetIds
);
// Save permissions (bulk desired-state)
await saveGroupPermissions(groupId, enabledPermissions);
// Save token rate limits (create/update/delete)
await saveTokenLimits(groupId, tokenLimits, tokenRateLimits ?? []);
@@ -242,6 +266,7 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
mutate(SWR_KEYS.adminUserGroups);
mutate(SWR_KEYS.userGroupTokenRateLimit(groupId));
mutate(SWR_KEYS.userGroupPermissions(groupId));
toast.success(`Group "${trimmed}" updated`);
router.push("/admin/groups");
} catch (e) {
@@ -431,6 +456,11 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
)}
</Section>
<GroupPermissionsSection
enabledPermissions={enabledPermissions}
onPermissionsChange={setEnabledPermissions}
/>
<SharedGroupResources
selectedCcPairIds={selectedCcPairIds}
onCcPairIdsChange={setSelectedCcPairIds}

View File

@@ -0,0 +1,133 @@
"use client";
import { Fragment } from "react";
import useSWR from "swr";
import { ContentAction } from "@opal/layouts";
import {
SvgSettings,
SvgPlug,
SvgActions,
SvgUsers,
SvgUserKey,
SvgSlack,
SvgPlusCircle,
SvgUserManage,
SvgBarChart,
SvgHistory,
SvgKey,
SvgShield,
SvgCpu,
SvgFiles,
SvgCreateAgent,
SvgManageAgent,
} from "@opal/icons";
import type { IconFunctionComponent } from "@opal/types";
import Card from "@/refresh-components/cards/Card";
import Switch from "@/refresh-components/inputs/Switch";
import Separator from "@/refresh-components/Separator";
import SimpleCollapsible from "@/refresh-components/SimpleCollapsible";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { SWR_KEYS } from "@/lib/swr-keys";
import type { PermissionRegistryEntry } from "@/refresh-pages/admin/GroupsPage/interfaces";
// ---------------------------------------------------------------------------
// Icon mapping — the only permission metadata maintained in the frontend.
// The `id` keys must match the backend PERMISSION_REGISTRY entries.
// ---------------------------------------------------------------------------
const ICON_MAP: Record<string, IconFunctionComponent> = {
manage_llms: SvgCpu,
manage_connectors_and_document_sets: SvgFiles,
manage_actions: SvgActions,
manage_groups: SvgUsers,
manage_service_accounts: SvgUserKey,
manage_slack_discord_bots: SvgSlack,
create_agents: SvgCreateAgent,
manage_agents: SvgManageAgent,
view_agent_analytics: SvgBarChart,
view_query_history: SvgHistory,
create_user_access_token: SvgKey,
};
// ---------------------------------------------------------------------------
// Component
// ---------------------------------------------------------------------------
interface GroupPermissionsSectionProps {
enabledPermissions: Set<string>;
onPermissionsChange: (permissions: Set<string>) => void;
}
function GroupPermissionsSection({
enabledPermissions,
onPermissionsChange,
}: GroupPermissionsSectionProps) {
const { data: registry, isLoading } = useSWR<PermissionRegistryEntry[]>(
SWR_KEYS.permissionRegistry,
errorHandlingFetcher
);
function isRowEnabled(entry: PermissionRegistryEntry): boolean {
return entry.permissions.every((p) => enabledPermissions.has(p));
}
function handleToggle(entry: PermissionRegistryEntry, checked: boolean) {
const next = new Set(enabledPermissions);
for (const perm of entry.permissions) {
if (checked) {
next.add(perm);
} else {
next.delete(perm);
}
}
onPermissionsChange(next);
}
return (
<SimpleCollapsible>
<SimpleCollapsible.Header
title="Group Permissions"
description="Set access and permissions for members of this group."
/>
<SimpleCollapsible.Content>
{isLoading || !registry ? (
<SimpleLoader />
) : (
<Card>
{registry.map((entry, index) => {
const prevGroup =
index > 0 ? registry[index - 1]!.group : entry.group;
const icon = ICON_MAP[entry.id] ?? SvgShield;
return (
<Fragment key={entry.id}>
{index > 0 && entry.group !== prevGroup && (
<Separator noPadding />
)}
<ContentAction
icon={icon}
title={entry.display_name}
description={entry.description}
sizePreset="main-ui"
variant="section"
paddingVariant="md"
rightChildren={
<Switch
checked={isRowEnabled(entry)}
onCheckedChange={(checked) =>
handleToggle(entry, checked)
}
/>
}
/>
</Fragment>
);
})}
</Card>
)}
</SimpleCollapsible.Content>
</SimpleCollapsible>
);
}
export default GroupPermissionsSection;

View File

@@ -20,3 +20,12 @@ export interface TokenRateLimitDisplay {
token_budget: number;
period_hours: number;
}
/** Mirrors backend PermissionRegistryEntry from onyx.auth.permissions. */
export interface PermissionRegistryEntry {
id: string;
display_name: string;
description: string;
permissions: string[];
group: number;
}

View File

@@ -281,6 +281,27 @@ async function saveTokenLimits(
}
}
// ---------------------------------------------------------------------------
// Group permissions — bulk set desired permissions in a single request
// ---------------------------------------------------------------------------
async function saveGroupPermissions(
groupId: number,
enabledPermissions: Set<string>
): Promise<void> {
const res = await fetch(`${USER_GROUP_URL}/${groupId}/permissions`, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ permissions: Array.from(enabledPermissions) }),
});
if (!res.ok) {
const detail = await res.json().catch(() => null);
throw new Error(
detail?.detail ?? `Failed to update permissions: ${res.statusText}`
);
}
}
export {
renameGroup,
createGroup,
@@ -289,4 +310,5 @@ export {
updateAgentGroupSharing,
updateDocSetGroupSharing,
saveTokenLimits,
saveGroupPermissions,
};

View File

@@ -18,7 +18,7 @@ import {
unsetDefaultImageGenerationConfig,
deleteImageGenerationConfig,
} from "@/refresh-pages/admin/ImageGenerationPage/svc";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import Message from "@/refresh-components/messages/Message";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import InputSelect from "@/refresh-components/inputs/InputSelect";
@@ -264,7 +264,7 @@ export default function ImageGenerationContent() {
sizePreset="main-ui"
variant="section"
icon={() => (
<ModelIcon
<ProviderIcon
provider={provider.provider_name}
size={16}
/>
@@ -391,7 +391,7 @@ export default function ImageGenerationContent() {
key={p.image_provider_id}
value={p.image_provider_id}
icon={() => (
<ModelIcon
<ProviderIcon
provider={p.provider_name}
size={16}
/>

View File

@@ -3,7 +3,7 @@
import React, { useState, useMemo, useEffect } from "react";
import { Form, Formik, FormikProps } from "formik";
import ProviderModal from "@/components/modals/ProviderModal";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import ConnectionProviderIcon from "@/refresh-components/ConnectionProviderIcon";
import {
testImageGenerationApiKey,
@@ -246,7 +246,7 @@ export function ImageGenFormWrapper<T extends FormValues>({
const icon = () => (
<ConnectionProviderIcon
icon={<ModelIcon provider={imageProvider.provider_name} size={24} />}
icon={<ProviderIcon provider={imageProvider.provider_name} size={24} />}
/>
);

View File

@@ -8,8 +8,8 @@ import {
useWellKnownLLMProviders,
} from "@/hooks/useLLMProviders";
import { ThreeDotsLoader } from "@/components/Loading";
import { Content, Card as CardLayout } from "@opal/layouts";
import { Button, SelectCard, Text, Card } from "@opal/components";
import { Content, Card } from "@opal/layouts";
import { Button, SelectCard } from "@opal/components";
import { Hoverable } from "@opal/core";
import { SvgArrowExchange, SvgSettings, SvgTrash } from "@opal/icons";
import * as SettingsLayouts from "@/layouts/settings-layouts";
@@ -22,14 +22,15 @@ import {
} from "@/lib/llmConfig/providers";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { deleteLlmProvider, setDefaultLlmModel } from "@/lib/llmConfig/svc";
import Text from "@/refresh-components/texts/Text";
import { Horizontal as HorizontalInput } from "@/layouts/input-layouts";
import LegacyCard from "@/refresh-components/cards/Card";
import InputSelect from "@/refresh-components/inputs/InputSelect";
import Message from "@/refresh-components/messages/Message";
import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationModalLayout";
import { useCreateModal } from "@/refresh-components/contexts/ModalContext";
import Separator from "@/refresh-components/Separator";
import {
LLMProviderName,
LLMProviderView,
WellKnownLLMProviderDescriptor,
} from "@/interfaces/llm";
@@ -42,12 +43,11 @@ import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
import VertexAIModal from "@/sections/modals/llmConfig/VertexAIModal";
import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import LMStudioModal from "@/sections/modals/llmConfig/LMStudioModal";
import LMStudioForm from "@/sections/modals/llmConfig/LMStudioForm";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
import { Section } from "@/layouts/general-layouts";
import { markdown } from "@opal/utils";
const route = ADMIN_ROUTES.LLM_MODELS;
@@ -58,18 +58,17 @@ const route = ADMIN_ROUTES.LLM_MODELS;
// Client-side ordering for the "Add Provider" cards. The backend may return
// wellKnownLLMProviders in an arbitrary order, so we sort explicitly here.
const PROVIDER_DISPLAY_ORDER: string[] = [
LLMProviderName.OPENAI,
LLMProviderName.ANTHROPIC,
LLMProviderName.VERTEX_AI,
LLMProviderName.BEDROCK,
LLMProviderName.AZURE,
LLMProviderName.LITELLM,
LLMProviderName.LITELLM_PROXY,
LLMProviderName.OLLAMA_CHAT,
LLMProviderName.OPENROUTER,
LLMProviderName.LM_STUDIO,
LLMProviderName.BIFROST,
LLMProviderName.OPENAI_COMPATIBLE,
"openai",
"anthropic",
"vertex_ai",
"bedrock",
"azure",
"litellm_proxy",
"ollama_chat",
"openrouter",
"lm_studio",
"bifrost",
"openai_compatible",
];
const PROVIDER_MODAL_MAP: Record<
@@ -101,7 +100,7 @@ const PROVIDER_MODAL_MAP: Record<
<OpenRouterModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
lm_studio: (d, onOpenChange) => (
<LMStudioModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
<LMStudioForm shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
),
litellm_proxy: (d, onOpenChange) => (
<LiteLLMProxyModal shouldMarkAsDefault={d} onOpenChange={onOpenChange} />
@@ -140,7 +139,7 @@ function ExistingProviderCard({
const handleDelete = async () => {
try {
await deleteLlmProvider(provider.id, isLastProvider);
await deleteLlmProvider(provider.id);
await refreshLlmProviderCaches(mutate);
deleteModal.toggle(false);
toast.success("Provider deleted successfully!");
@@ -155,37 +154,24 @@ function ExistingProviderCard({
{deleteModal.isOpen && (
<ConfirmationModalLayout
icon={SvgTrash}
title={markdown(`Delete *${provider.name}*`)}
title={`Delete ${provider.name}`}
onClose={() => deleteModal.toggle(false)}
submit={
<Button
variant="danger"
onClick={handleDelete}
disabled={isDefault && !isLastProvider}
>
<Button variant="danger" onClick={handleDelete}>
Delete
</Button>
}
>
<Section alignItems="start" gap={0.5}>
{isDefault && !isLastProvider ? (
<Text font="main-ui-body" color="text-03">
Cannot delete the default provider. Select another provider as
the default prior to deleting this one.
<Text text03>
All LLM models from provider <b>{provider.name}</b> will be
removed and unavailable for future chats. Chat history will be
preserved.
</Text>
{isLastProvider && (
<Text text03>
Connect another provider to continue using chats.
</Text>
) : (
<>
<Text font="main-ui-body" color="text-03">
{markdown(
`All LLM models from provider **${provider.name}** will be removed and unavailable for future chats. Chat history will be preserved.`
)}
</Text>
{isLastProvider && (
<Text font="main-ui-body" color="text-03">
Connect another provider to continue using chats.
</Text>
)}
</>
)}
</Section>
</ConfirmationModalLayout>
@@ -201,7 +187,7 @@ function ExistingProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
<Card.Header
icon={getProviderIcon(provider.provider)}
title={provider.name}
description={getProviderDisplayName(provider.provider)}
@@ -271,7 +257,7 @@ function NewProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
<Card.Header
icon={getProviderIcon(provider.name)}
title={getProviderProductName(provider.name)}
description={getProviderDisplayName(provider.name)}
@@ -315,7 +301,7 @@ function NewCustomProviderCard({
rounding="lg"
onClick={() => setIsOpen(true)}
>
<CardLayout.Header
<Card.Header
icon={getProviderIcon("custom")}
title={getProviderProductName("custom")}
description={getProviderDisplayName("custom")}
@@ -348,7 +334,7 @@ function NewCustomProviderCard({
// LLMConfigurationPage — main page component
// ============================================================================
export default function LLMProviderConfigurationPage() {
export default function LLMConfigurationPage() {
const { mutate } = useSWRConfig();
const { llmProviders: existingLlmProviders, defaultText } =
useAdminLLMProviders();
@@ -404,7 +390,7 @@ export default function LLMProviderConfigurationPage() {
<SettingsLayouts.Body>
{hasProviders ? (
<Card border="solid" rounding="lg">
<LegacyCard>
<HorizontalInput
title="Default Model"
description="This model will be used by Onyx by default in your chats."
@@ -435,7 +421,7 @@ export default function LLMProviderConfigurationPage() {
</InputSelect.Content>
</InputSelect>
</HorizontalInput>
</Card>
</LegacyCard>
) : (
<Message
info

View File

@@ -1,99 +1,174 @@
"use client";
import { useState } from "react";
import { useSWRConfig } from "swr";
import { LLMProviderFormProps, LLMProviderName } from "@/interfaces/llm";
import { Formik } from "formik";
import { LLMProviderFormProps } from "@/interfaces/llm";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
APIKeyField,
ModelSelectionField,
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
ModelsAccessField,
FieldSeparator,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import * as InputLayouts from "@/layouts/input-layouts";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { toast } from "@/hooks/useToast";
const ANTHROPIC_PROVIDER_NAME = "anthropic";
const DEFAULT_DEFAULT_MODEL_NAME = "claude-sonnet-4-5";
export default function AnthropicModal({
variant = "llm-configuration",
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const isOnboarding = variant === "onboarding";
const [isTesting, setIsTesting] = useState(false);
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
ANTHROPIC_PROVIDER_NAME
);
const onClose = () => onOpenChange?.(false);
const initialValues = useInitialValues(
isOnboarding,
LLMProviderName.ANTHROPIC,
existingLlmProvider
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const validationSchema = buildValidationSchema(isOnboarding, {
apiKey: true,
});
const initialValues = isOnboarding
? {
...buildOnboardingInitialValues(),
name: ANTHROPIC_PROVIDER_NAME,
provider: ANTHROPIC_PROVIDER_NAME,
api_key: "",
default_model_name: DEFAULT_DEFAULT_MODEL_NAME,
}
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? undefined,
default_model_name:
(defaultModelName &&
modelConfigurations.some((m) => m.name === defaultModelName)
? defaultModelName
: undefined) ??
wellKnownLLMProvider?.recommended_default_model?.name ??
DEFAULT_DEFAULT_MODEL_NAME,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? true,
};
const validationSchema = isOnboarding
? Yup.object().shape({
api_key: Yup.string().required("API Key is required"),
default_model_name: Yup.string().required("Model name is required"),
})
: buildDefaultValidationSchema().shape({
api_key: Yup.string().required("API Key is required"),
});
return (
<ModalWrapper
providerName={LLMProviderName.ANTHROPIC}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.ANTHROPIC,
values,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
(wellKnownLLMProvider ?? llmDescriptor)?.known_models ?? [];
await submitOnboardingProvider({
providerName: ANTHROPIC_PROVIDER_NAME,
payload: {
...values,
model_configurations: modelConfigsToUse,
is_auto_mode:
values.default_model_name === DEFAULT_DEFAULT_MODEL_NAME,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: ANTHROPIC_PROVIDER_NAME,
values,
initialValues,
modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<APIKeyField providerName="Anthropic" />
{(formikProps) => (
<LLMConfigurationModalWrapper
providerEndpoint={ANTHROPIC_PROVIDER_NAME}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<APIKeyField providerName="Anthropic" />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
{!isOnboarding && (
<>
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. claude-sonnet-4-5" />
) : (
<ModelsField
modelConfigurations={modelConfigurations}
formikProps={formikProps}
recommendedDefaultModel={
wellKnownLLMProvider?.recommended_default_model ?? null
}
shouldShowAutoUpdateToggle={true}
/>
)}
{!isOnboarding && (
<>
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</LLMConfigurationModalWrapper>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField shouldShowAutoUpdateToggle={true} />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
</>
)}
</ModalWrapper>
</Formik>
);
}

View File

@@ -1,35 +1,45 @@
"use client";
import { useState } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import { Formik } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
APIKeyField,
DisplayNameField,
ModelAccessField,
ModelSelectionField,
ModalWrapper,
FieldSeparator,
FieldWrapper,
ModelsAccessField,
ModelsField,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import {
isValidAzureTargetUri,
parseAzureTargetUri,
} from "@/lib/azureTargetUri";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
const AZURE_PROVIDER_NAME = "azure";
interface AzureModalValues extends BaseLLMFormValues {
api_key: string;
@@ -39,33 +49,6 @@ interface AzureModalValues extends BaseLLMFormValues {
deployment_name?: string;
}
function AzureModelSelection() {
const formikProps = useFormikContext<AzureModalValues>();
return (
<ModelSelectionField
shouldShowAutoUpdateToggle={false}
onAddModel={(modelName) => {
const current = formikProps.values.model_configurations;
if (current.some((m) => m.name === modelName)) return;
const updated = [
...current,
{
name: modelName,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
},
];
formikProps.setFieldValue("model_configurations", updated);
if (!formikProps.values.test_model_name) {
formikProps.setFieldValue("test_model_name", modelName);
}
}}
/>
);
}
function buildTargetUri(existingLlmProvider?: LLMProviderView): string {
if (!existingLlmProvider?.api_base || !existingLlmProvider?.api_version) {
return "";
@@ -101,103 +84,189 @@ export default function AzureModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const isOnboarding = variant === "onboarding";
const [isTesting, setIsTesting] = useState(false);
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(AZURE_PROVIDER_NAME);
const onClose = () => onOpenChange?.(false);
const [addedModels, setAddedModels] = useState<ModelConfiguration[]>([]);
const initialValues: AzureModalValues = {
...useInitialValues(
isOnboarding,
LLMProviderName.AZURE,
existingLlmProvider
),
target_uri: buildTargetUri(existingLlmProvider),
} as AzureModalValues;
const onClose = () => {
setAddedModels([]);
onOpenChange?.(false);
};
const validationSchema = buildValidationSchema(isOnboarding, {
apiKey: true,
extra: {
target_uri: Yup.string()
.required("Target URI is required")
.test(
"valid-target-uri",
"Target URI must be a valid URL with api-version query parameter and either a deployment name in the path or /openai/responses",
(value) => (value ? isValidAzureTargetUri(value) : false)
const baseModelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
// Merge base models with any user-added models (dedup by name)
const existingNames = new Set(baseModelConfigurations.map((m) => m.name));
const modelConfigurations = [
...baseModelConfigurations,
...addedModels.filter((m) => !existingNames.has(m.name)),
];
const initialValues: AzureModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: AZURE_PROVIDER_NAME,
provider: AZURE_PROVIDER_NAME,
api_key: "",
target_uri: "",
default_model_name: "",
} as AzureModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
},
});
api_key: existingLlmProvider?.api_key ?? "",
target_uri: buildTargetUri(existingLlmProvider),
};
const validationSchema = isOnboarding
? Yup.object().shape({
api_key: Yup.string().required("API Key is required"),
target_uri: Yup.string()
.required("Target URI is required")
.test(
"valid-target-uri",
"Target URI must be a valid URL with api-version query parameter and either a deployment name in the path or /openai/responses",
(value) => (value ? isValidAzureTargetUri(value) : false)
),
default_model_name: Yup.string().required("Model name is required"),
})
: buildDefaultValidationSchema().shape({
api_key: Yup.string().required("API Key is required"),
target_uri: Yup.string()
.required("Target URI is required")
.test(
"valid-target-uri",
"Target URI must be a valid URL with api-version query parameter and either a deployment name in the path or /openai/responses",
(value) => (value ? isValidAzureTargetUri(value) : false)
),
});
return (
<ModalWrapper
providerName={LLMProviderName.AZURE}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
const processedValues = processValues(values);
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.AZURE,
values: processedValues,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
(wellKnownLLMProvider ?? llmDescriptor)?.known_models ?? [];
await submitOnboardingProvider({
providerName: AZURE_PROVIDER_NAME,
payload: {
...processedValues,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: AZURE_PROVIDER_NAME,
values: processedValues,
initialValues,
modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="target_uri"
title="Target URI"
subDescription="Paste your endpoint target URI from Azure OpenAI (including API endpoint base, deployment name, and API version)."
{(formikProps) => (
<LLMConfigurationModalWrapper
providerEndpoint={AZURE_PROVIDER_NAME}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<InputTypeInField
name="target_uri"
placeholder="https://your-resource.cognitiveservices.azure.com/openai/deployments/deployment-name/chat/completions?api-version=2025-01-01-preview"
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<FieldWrapper>
<InputLayouts.Vertical
name="target_uri"
title="Target URI"
subDescription="Paste your endpoint target URI from Azure OpenAI (including API endpoint base, deployment name, and API version)."
>
<InputTypeInField
name="target_uri"
placeholder="https://your-resource.cognitiveservices.azure.com/openai/deployments/deployment-name/chat/completions?api-version=2025-01-01-preview"
/>
</InputLayouts.Vertical>
</FieldWrapper>
<APIKeyField providerName="Azure" />
<APIKeyField providerName="Azure" />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
{!isOnboarding && (
<>
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. gpt-4o" />
) : (
<ModelsField
modelConfigurations={modelConfigurations}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
onAddModel={(modelName) => {
const newModel: ModelConfiguration = {
name: modelName,
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
};
setAddedModels((prev) => [...prev, newModel]);
const currentSelected =
formikProps.values.selected_model_names ?? [];
formikProps.setFieldValue("selected_model_names", [
...currentSelected,
modelName,
]);
if (!formikProps.values.default_model_name) {
formikProps.setFieldValue("default_model_name", modelName);
}
}}
/>
)}
{!isOnboarding && (
<>
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</LLMConfigurationModalWrapper>
)}
<InputLayouts.FieldSeparator />
<AzureModelSelection />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
</>
)}
</ModalWrapper>
</Formik>
);
}

View File

@@ -1,8 +1,8 @@
"use client";
import { useEffect } from "react";
import { useState, useEffect } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import { Formik, FormikProps } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import InputSelectField from "@/refresh-components/form/InputSelectField";
import InputSelect from "@/refresh-components/inputs/InputSelect";
@@ -10,31 +10,40 @@ import * as InputLayouts from "@/layouts/input-layouts";
import PasswordInputTypeInField from "@/refresh-components/form/PasswordInputTypeInField";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
ModelSelectionField,
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
FieldSeparator,
FieldWrapper,
ModelsAccessField,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchBedrockModels } from "@/lib/llmConfig/svc";
import { fetchBedrockModels } from "@/app/admin/configuration/llm/utils";
import { Card } from "@opal/components";
import { Section } from "@/layouts/general-layouts";
import { SvgAlertCircle } from "@opal/icons";
import { Content } from "@opal/layouts";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import useOnMount from "@/hooks/useOnMount";
const BEDROCK_PROVIDER_NAME = "bedrock";
const AWS_REGION_OPTIONS = [
{ name: "us-east-1", value: "us-east-1" },
{ name: "us-east-2", value: "us-east-2" },
@@ -70,15 +79,26 @@ interface BedrockModalValues extends BaseLLMFormValues {
}
interface BedrockModalInternalsProps {
formikProps: FormikProps<BedrockModalValues>;
existingLlmProvider: LLMProviderView | undefined;
fetchedModels: ModelConfiguration[];
setFetchedModels: (models: ModelConfiguration[]) => void;
modelConfigurations: ModelConfiguration[];
isTesting: boolean;
onClose: () => void;
isOnboarding: boolean;
}
function BedrockModalInternals({
formikProps,
existingLlmProvider,
fetchedModels,
setFetchedModels,
modelConfigurations,
isTesting,
onClose,
isOnboarding,
}: BedrockModalInternalsProps) {
const formikProps = useFormikContext<BedrockModalValues>();
const authMethod = formikProps.values.custom_config?.BEDROCK_AUTH_METHOD;
useEffect(() => {
@@ -95,6 +115,11 @@ function BedrockModalInternals({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [authMethod]);
const currentModels =
fetchedModels.length > 0
? fetchedModels
: existingLlmProvider?.model_configurations || modelConfigurations;
const isAuthComplete =
authMethod === AUTH_METHOD_IAM ||
(authMethod === AUTH_METHOD_ACCESS_KEY &&
@@ -114,17 +139,36 @@ function BedrockModalInternals({
formikProps.values.custom_config?.AWS_SECRET_ACCESS_KEY,
aws_bearer_token_bedrock:
formikProps.values.custom_config?.AWS_BEARER_TOKEN_BEDROCK,
provider_name: LLMProviderName.BEDROCK,
provider_name: existingLlmProvider?.name,
});
if (error) {
throw new Error(error);
}
formikProps.setFieldValue("model_configurations", models);
setFetchedModels(models);
};
// Auto-fetch models on initial load when editing an existing provider
useOnMount(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
});
return (
<>
<InputLayouts.FieldPadder>
<LLMConfigurationModalWrapper
providerEndpoint={BEDROCK_PROVIDER_NAME}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<FieldWrapper>
<Section gap={1}>
<InputLayouts.Vertical
name={FIELD_AWS_REGION_NAME}
@@ -178,7 +222,7 @@ function BedrockModalInternals({
</InputSelect>
</InputLayouts.Vertical>
</Section>
</InputLayouts.FieldPadder>
</FieldWrapper>
{authMethod === AUTH_METHOD_ACCESS_KEY && (
<Card background="light" border="none" padding="sm">
@@ -206,7 +250,7 @@ function BedrockModalInternals({
)}
{authMethod === AUTH_METHOD_IAM && (
<InputLayouts.FieldPadder>
<FieldWrapper>
<Card background="none" border="solid" padding="sm">
<Content
icon={SvgAlertCircle}
@@ -215,7 +259,7 @@ function BedrockModalInternals({
sizePreset="main-ui"
/>
</Card>
</InputLayouts.FieldPadder>
</FieldWrapper>
)}
{authMethod === AUTH_METHOD_LONG_TERM_API_KEY && (
@@ -236,24 +280,32 @@ function BedrockModalInternals({
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. us.anthropic.claude-sonnet-4-5-v1" />
) : (
<ModelsField
modelConfigurations={currentModels}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
)}
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</>
</LLMConfigurationModalWrapper>
);
}
@@ -262,52 +314,84 @@ export default function BedrockModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const [fetchedModels, setFetchedModels] = useState<ModelConfiguration[]>([]);
const [isTesting, setIsTesting] = useState(false);
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
BEDROCK_PROVIDER_NAME
);
const onClose = () => onOpenChange?.(false);
const initialValues: BedrockModalValues = {
...useInitialValues(
isOnboarding,
LLMProviderName.BEDROCK,
existingLlmProvider
),
custom_config: {
AWS_REGION_NAME:
(existingLlmProvider?.custom_config?.AWS_REGION_NAME as string) ?? "",
BEDROCK_AUTH_METHOD:
(existingLlmProvider?.custom_config?.BEDROCK_AUTH_METHOD as string) ??
"access_key",
AWS_ACCESS_KEY_ID:
(existingLlmProvider?.custom_config?.AWS_ACCESS_KEY_ID as string) ?? "",
AWS_SECRET_ACCESS_KEY:
(existingLlmProvider?.custom_config?.AWS_SECRET_ACCESS_KEY as string) ??
"",
AWS_BEARER_TOKEN_BEDROCK:
(existingLlmProvider?.custom_config
?.AWS_BEARER_TOKEN_BEDROCK as string) ?? "",
},
} as BedrockModalValues;
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const validationSchema = buildValidationSchema(isOnboarding, {
extra: {
custom_config: Yup.object({
AWS_REGION_NAME: Yup.string().required("AWS Region is required"),
}),
},
});
const initialValues: BedrockModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: BEDROCK_PROVIDER_NAME,
provider: BEDROCK_PROVIDER_NAME,
default_model_name: "",
custom_config: {
AWS_REGION_NAME: "",
BEDROCK_AUTH_METHOD: "access_key",
AWS_ACCESS_KEY_ID: "",
AWS_SECRET_ACCESS_KEY: "",
AWS_BEARER_TOKEN_BEDROCK: "",
},
} as BedrockModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
custom_config: {
AWS_REGION_NAME:
(existingLlmProvider?.custom_config?.AWS_REGION_NAME as string) ??
"",
BEDROCK_AUTH_METHOD:
(existingLlmProvider?.custom_config
?.BEDROCK_AUTH_METHOD as string) ?? "access_key",
AWS_ACCESS_KEY_ID:
(existingLlmProvider?.custom_config?.AWS_ACCESS_KEY_ID as string) ??
"",
AWS_SECRET_ACCESS_KEY:
(existingLlmProvider?.custom_config
?.AWS_SECRET_ACCESS_KEY as string) ?? "",
AWS_BEARER_TOKEN_BEDROCK:
(existingLlmProvider?.custom_config
?.AWS_BEARER_TOKEN_BEDROCK as string) ?? "",
},
};
const validationSchema = isOnboarding
? Yup.object().shape({
default_model_name: Yup.string().required("Model name is required"),
custom_config: Yup.object({
AWS_REGION_NAME: Yup.string().required("AWS Region is required"),
}),
})
: buildDefaultValidationSchema().shape({
custom_config: Yup.object({
AWS_REGION_NAME: Yup.string().required("AWS Region is required"),
}),
});
return (
<ModalWrapper
providerName={LLMProviderName.BEDROCK}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
const filteredCustomConfig = Object.fromEntries(
Object.entries(values.custom_config || {}).filter(([, v]) => v !== "")
);
@@ -320,37 +404,51 @@ export default function BedrockModal({
: undefined,
};
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.BEDROCK,
values: submitValues,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
fetchedModels.length > 0 ? fetchedModels : [];
await submitOnboardingProvider({
providerName: BEDROCK_PROVIDER_NAME,
payload: {
...submitValues,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: BEDROCK_PROVIDER_NAME,
values: submitValues,
initialValues,
modelConfigurations:
fetchedModels.length > 0 ? fetchedModels : modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<BedrockModalInternals
existingLlmProvider={existingLlmProvider}
isOnboarding={isOnboarding}
/>
</ModalWrapper>
{(formikProps) => (
<BedrockModalInternals
formikProps={formikProps}
existingLlmProvider={existingLlmProvider}
fetchedModels={fetchedModels}
setFetchedModels={setFetchedModels}
modelConfigurations={modelConfigurations}
isTesting={isTesting}
onClose={onClose}
isOnboarding={isOnboarding}
/>
)}
</Formik>
);
}

View File

@@ -1,32 +1,45 @@
"use client";
import { useState, useEffect } from "react";
import { markdown } from "@opal/utils";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import { Formik, FormikProps } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import PasswordInputTypeInField from "@/refresh-components/form/PasswordInputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import { fetchBifrostModels } from "@/lib/llmConfig/svc";
import { fetchBifrostModels } from "@/app/admin/configuration/llm/utils";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
APIBaseField,
APIKeyField,
ModelSelectionField,
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
ModelsAccessField,
FieldSeparator,
FieldWrapper,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
const BIFROST_PROVIDER_NAME = LLMProviderName.BIFROST;
const DEFAULT_API_BASE = "";
interface BifrostModalValues extends BaseLLMFormValues {
api_key: string;
@@ -34,15 +47,30 @@ interface BifrostModalValues extends BaseLLMFormValues {
}
interface BifrostModalInternalsProps {
formikProps: FormikProps<BifrostModalValues>;
existingLlmProvider: LLMProviderView | undefined;
fetchedModels: ModelConfiguration[];
setFetchedModels: (models: ModelConfiguration[]) => void;
modelConfigurations: ModelConfiguration[];
isTesting: boolean;
onClose: () => void;
isOnboarding: boolean;
}
function BifrostModalInternals({
formikProps,
existingLlmProvider,
fetchedModels,
setFetchedModels,
modelConfigurations,
isTesting,
onClose,
isOnboarding,
}: BifrostModalInternalsProps) {
const formikProps = useFormikContext<BifrostModalValues>();
const currentModels =
fetchedModels.length > 0
? fetchedModels
: existingLlmProvider?.model_configurations || modelConfigurations;
const isFetchDisabled = !formikProps.values.api_base;
@@ -50,48 +78,91 @@ function BifrostModalInternals({
const { models, error } = await fetchBifrostModels({
api_base: formikProps.values.api_base,
api_key: formikProps.values.api_key || undefined,
provider_name: LLMProviderName.BIFROST,
provider_name: existingLlmProvider?.name,
});
if (error) {
throw new Error(error);
}
formikProps.setFieldValue("model_configurations", models);
setFetchedModels(models);
};
return (
<>
<APIBaseField
subDescription="Paste your Bifrost gateway endpoint URL (including API version)."
placeholder="https://your-bifrost-gateway.com/v1"
/>
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
console.error("Failed to fetch Bifrost models:", err);
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
<APIKeyField
optional
subDescription={markdown(
"Paste your API key from [Bifrost](https://docs.getbifrost.ai/overview) to access your models."
)}
/>
return (
<LLMConfigurationModalWrapper
providerEndpoint={LLMProviderName.BIFROST}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<FieldWrapper>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
subDescription="Paste your Bifrost gateway endpoint URL (including API version)."
>
<InputTypeInField
name="api_base"
placeholder="https://your-bifrost-gateway.com/v1"
/>
</InputLayouts.Vertical>
</FieldWrapper>
<FieldWrapper>
<InputLayouts.Vertical
name="api_key"
title="API Key"
suffix="optional"
subDescription={markdown(
"Paste your API key from [Bifrost](https://docs.getbifrost.ai/overview) to access your models."
)}
>
<PasswordInputTypeInField name="api_key" placeholder="API Key" />
</InputLayouts.Vertical>
</FieldWrapper>
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. anthropic/claude-sonnet-4-6" />
) : (
<ModelsField
modelConfigurations={currentModels}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
)}
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</>
</LLMConfigurationModalWrapper>
);
}
@@ -100,62 +171,105 @@ export default function BifrostModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const [fetchedModels, setFetchedModels] = useState<ModelConfiguration[]>([]);
const [isTesting, setIsTesting] = useState(false);
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
BIFROST_PROVIDER_NAME
);
const onClose = () => onOpenChange?.(false);
const initialValues: BifrostModalValues = useInitialValues(
isOnboarding,
LLMProviderName.BIFROST,
existingLlmProvider
) as BifrostModalValues;
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const validationSchema = buildValidationSchema(isOnboarding, {
apiBase: true,
});
const initialValues: BifrostModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: BIFROST_PROVIDER_NAME,
provider: BIFROST_PROVIDER_NAME,
api_key: "",
api_base: DEFAULT_API_BASE,
default_model_name: "",
} as BifrostModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
};
const validationSchema = isOnboarding
? Yup.object().shape({
api_base: Yup.string().required("API Base URL is required"),
default_model_name: Yup.string().required("Model name is required"),
})
: buildDefaultValidationSchema().shape({
api_base: Yup.string().required("API Base URL is required"),
});
return (
<ModalWrapper
providerName={LLMProviderName.BIFROST}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.BIFROST,
values,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
fetchedModels.length > 0 ? fetchedModels : [];
await submitOnboardingProvider({
providerName: BIFROST_PROVIDER_NAME,
payload: {
...values,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: BIFROST_PROVIDER_NAME,
values,
initialValues,
modelConfigurations:
fetchedModels.length > 0 ? fetchedModels : modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<BifrostModalInternals
existingLlmProvider={existingLlmProvider}
isOnboarding={isOnboarding}
/>
</ModalWrapper>
{(formikProps) => (
<BifrostModalInternals
formikProps={formikProps}
existingLlmProvider={existingLlmProvider}
fetchedModels={fetchedModels}
setFetchedModels={setFetchedModels}
modelConfigurations={modelConfigurations}
isTesting={isTesting}
onClose={onClose}
isOnboarding={isOnboarding}
/>
)}
</Formik>
);
}

View File

@@ -1,22 +1,25 @@
"use client";
import { useState } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import {
LLMProviderFormProps,
LLMProviderName,
ModelConfiguration,
} from "@/interfaces/llm";
import { Formik, FormikProps } from "formik";
import { LLMProviderFormProps, ModelConfiguration } from "@/interfaces/llm";
import * as Yup from "yup";
import { useInitialValues } from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
buildDefaultInitialValues,
buildOnboardingInitialValues,
} from "@/sections/modals/llmConfig/utils";
import {
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
APIKeyField,
APIBaseField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
FieldSeparator,
ModelsAccessField,
LLMConfigurationModalWrapper,
FieldWrapper,
} from "@/sections/modals/llmConfig/shared";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
@@ -30,7 +33,6 @@ import { Button, Card, EmptyMessageCard } from "@opal/components";
import { SvgMinusCircle, SvgPlusCircle } from "@opal/icons";
import { markdown } from "@opal/utils";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { Content } from "@opal/layouts";
import { Section } from "@/layouts/general-layouts";
@@ -107,10 +109,13 @@ function ModelConfigurationItem({
);
}
function ModelConfigurationList() {
const formikProps = useFormikContext<{
interface ModelConfigurationListProps {
formikProps: FormikProps<{
model_configurations: CustomModelConfiguration[];
}>();
}>;
}
function ModelConfigurationList({ formikProps }: ModelConfigurationListProps) {
const models = formikProps.values.model_configurations;
function handleChange(index: number, next: CustomModelConfiguration) {
@@ -176,19 +181,6 @@ function ModelConfigurationList() {
);
}
function CustomConfigKeyValue() {
const formikProps = useFormikContext<{ custom_config_list: KeyValue[] }>();
return (
<KeyValueInput
items={formikProps.values.custom_config_list}
onChange={(items) =>
formikProps.setFieldValue("custom_config_list", items)
}
addButtonLabel="Add Line"
/>
);
}
// ─── Custom Config Processing ─────────────────────────────────────────────────
function keyValueListToDict(items: KeyValue[]): Record<string, string> {
@@ -206,38 +198,40 @@ export default function CustomModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
}: LLMProviderFormProps) {
const isOnboarding = variant === "onboarding";
const [isTesting, setIsTesting] = useState(false);
const { mutate } = useSWRConfig();
const onClose = () => onOpenChange?.(false);
const initialValues = {
...useInitialValues(
isOnboarding,
LLMProviderName.CUSTOM,
existingLlmProvider
...buildDefaultInitialValues(
existingLlmProvider,
undefined,
defaultModelName
),
...(isOnboarding ? buildOnboardingInitialValues() : {}),
provider: existingLlmProvider?.provider ?? "",
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? "",
api_version: existingLlmProvider?.api_version ?? "",
model_configurations: existingLlmProvider?.model_configurations.map(
(mc) => ({
name: mc.name,
display_name: mc.display_name ?? "",
is_visible: mc.is_visible,
max_input_tokens: mc.max_input_tokens ?? null,
supports_image_input: mc.supports_image_input,
supports_reasoning: mc.supports_reasoning,
})
) ?? [
{
name: "",
display_name: "",
is_visible: true,
max_input_tokens: null,
supports_image_input: false,
supports_reasoning: false,
},
],
custom_config_list: existingLlmProvider?.custom_config
@@ -269,13 +263,11 @@ export default function CustomModal({
});
return (
<ModalWrapper
providerName={LLMProviderName.CUSTOM}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
setSubmitting(true);
const modelConfigurations = values.model_configurations
@@ -300,120 +292,156 @@ export default function CustomModal({
// created via CustomModal.
const customConfig = keyValueListToDict(values.custom_config_list);
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: (values as Record<string, unknown>).provider as string,
values: {
...values,
model_configurations: modelConfigurations,
custom_config: customConfig,
},
initialValues: {
...initialValues,
custom_config: keyValueListToDict(initialValues.custom_config_list),
},
existingLlmProvider,
shouldMarkAsDefault,
isCustomProvider: true,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
if (isOnboarding && onboardingState && onboardingActions) {
await submitOnboardingProvider({
providerName: values.provider,
payload: {
...values,
model_configurations: modelConfigurations,
custom_config: customConfig,
},
onboardingState,
onboardingActions,
isCustomProvider: true,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
const selectedModelNames = modelConfigurations.map(
(config) => config.name
);
await submitLLMProvider({
providerName: values.provider,
values: {
...values,
selected_model_names: selectedModelNames,
custom_config: customConfig,
},
initialValues: {
...initialValues,
custom_config: keyValueListToDict(
initialValues.custom_config_list
),
},
modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="provider"
title="Provider Name"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
{(formikProps) => (
<LLMConfigurationModalWrapper
providerEndpoint="custom"
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
{!isOnboarding && (
<FieldWrapper>
<InputLayouts.Vertical
name="provider"
title="Provider Name"
subDescription={markdown(
"Should be one of the providers listed at [LiteLLM](https://docs.litellm.ai/docs/providers)."
)}
>
<InputTypeInField
name="provider"
placeholder="Provider Name as shown on LiteLLM"
variant={existingLlmProvider ? "disabled" : undefined}
/>
</InputLayouts.Vertical>
</FieldWrapper>
)}
>
<InputTypeInField
name="provider"
placeholder="Provider Name as shown on LiteLLM"
variant={existingLlmProvider ? "disabled" : undefined}
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<APIBaseField optional />
<FieldWrapper>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
suffix="optional"
>
<InputTypeInField name="api_base" placeholder="https://" />
</InputLayouts.Vertical>
</FieldWrapper>
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="api_version"
title="API Version"
suffix="optional"
>
<InputTypeInField name="api_version" />
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<FieldWrapper>
<InputLayouts.Vertical
name="api_version"
title="API Version"
suffix="optional"
>
<InputTypeInField name="api_version" />
</InputLayouts.Vertical>
</FieldWrapper>
<APIKeyField
optional
subDescription="Paste your API key if your model provider requires authentication."
/>
<InputLayouts.FieldPadder>
<Section gap={0.75}>
<Content
title="Additional Configs"
description={markdown(
"Add extra properties as needed by the model provider. These are passed to LiteLLM's `completion()` call as [environment variables](https://docs.litellm.ai/docs/set_keys#environment-variables). See [documentation](https://docs.onyx.app/admins/ai_models/custom_inference_provider) for more instructions."
)}
widthVariant="full"
variant="section"
sizePreset="main-content"
<APIKeyField
optional
subDescription="Paste your API key if your model provider requires authentication."
/>
<CustomConfigKeyValue />
</Section>
</InputLayouts.FieldPadder>
<FieldWrapper>
<Section gap={0.75}>
<Content
title="Additional Configs"
description={markdown(
"Add extra properties as needed by the model provider. These are passed to LiteLLM's `completion()` call as [environment variables](https://docs.litellm.ai/docs/set_keys#environment-variables). See [documentation](https://docs.onyx.app/admins/ai_models/custom_inference_provider) for more instructions."
)}
widthVariant="full"
variant="section"
sizePreset="main-content"
/>
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
<KeyValueInput
items={formikProps.values.custom_config_list}
onChange={(items) =>
formikProps.setFieldValue("custom_config_list", items)
}
addButtonLabel="Add Line"
/>
</Section>
</FieldWrapper>
<FieldSeparator />
{!isOnboarding && (
<DisplayNameField disabled={!!existingLlmProvider} />
)}
<FieldSeparator />
<Section gap={0.5}>
<FieldWrapper>
<Content
title="Models"
description="List LLM models you wish to use and their configurations for this provider. See full list of models at LiteLLM."
variant="section"
sizePreset="main-content"
widthVariant="full"
/>
</FieldWrapper>
<Card padding="sm">
<ModelConfigurationList formikProps={formikProps as any} />
</Card>
</Section>
{!isOnboarding && (
<>
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</LLMConfigurationModalWrapper>
)}
<InputLayouts.FieldSeparator />
<Section gap={0.5}>
<InputLayouts.FieldPadder>
<Content
title="Models"
description="List LLM models you wish to use and their configurations for this provider. See full list of models at LiteLLM."
variant="section"
sizePreset="main-content"
widthVariant="full"
/>
</InputLayouts.FieldPadder>
<Card padding="sm">
<ModelConfigurationList />
</Card>
</Section>
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
</>
)}
</ModalWrapper>
</Formik>
);
}

View File

@@ -0,0 +1,312 @@
"use client";
import { useCallback, useEffect, useMemo, useState } from "react";
import { useSWRConfig } from "swr";
import { Formik, FormikProps } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import PasswordInputTypeInField from "@/refresh-components/form/PasswordInputTypeInField";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import {
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
ModelsField,
DisplayNameField,
ModelsAccessField,
FieldSeparator,
FieldWrapper,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchModels } from "@/app/admin/configuration/llm/utils";
import debounce from "lodash/debounce";
import { toast } from "@/hooks/useToast";
const DEFAULT_API_BASE = "http://localhost:1234";
interface LMStudioFormValues extends BaseLLMFormValues {
api_base: string;
custom_config: {
LM_STUDIO_API_KEY?: string;
};
}
interface LMStudioFormInternalsProps {
formikProps: FormikProps<LMStudioFormValues>;
existingLlmProvider: LLMProviderView | undefined;
fetchedModels: ModelConfiguration[];
setFetchedModels: (models: ModelConfiguration[]) => void;
isTesting: boolean;
onClose: () => void;
isOnboarding: boolean;
}
function LMStudioFormInternals({
formikProps,
existingLlmProvider,
fetchedModels,
setFetchedModels,
isTesting,
onClose,
isOnboarding,
}: LMStudioFormInternalsProps) {
const initialApiKey =
(existingLlmProvider?.custom_config?.LM_STUDIO_API_KEY as string) ?? "";
const doFetchModels = useCallback(
(apiBase: string, apiKey: string | undefined, signal: AbortSignal) => {
fetchModels(
LLMProviderName.LM_STUDIO,
{
api_base: apiBase,
custom_config: apiKey ? { LM_STUDIO_API_KEY: apiKey } : {},
api_key_changed: apiKey !== initialApiKey,
name: existingLlmProvider?.name,
},
signal
).then((data) => {
if (signal.aborted) return;
if (data.error) {
toast.error(data.error);
setFetchedModels([]);
return;
}
setFetchedModels(data.models);
});
},
[existingLlmProvider?.name, initialApiKey, setFetchedModels]
);
const debouncedFetchModels = useMemo(
() => debounce(doFetchModels, 500),
[doFetchModels]
);
const apiBase = formikProps.values.api_base;
const apiKey = formikProps.values.custom_config?.LM_STUDIO_API_KEY;
useEffect(() => {
if (apiBase) {
const controller = new AbortController();
debouncedFetchModels(apiBase, apiKey, controller.signal);
return () => {
debouncedFetchModels.cancel();
controller.abort();
};
} else {
setFetchedModels([]);
}
}, [apiBase, apiKey, debouncedFetchModels, setFetchedModels]);
const currentModels =
fetchedModels.length > 0
? fetchedModels
: existingLlmProvider?.model_configurations || [];
return (
<LLMConfigurationModalWrapper
providerEndpoint={LLMProviderName.LM_STUDIO}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<FieldWrapper>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
subDescription="The base URL for your LM Studio server."
>
<InputTypeInField
name="api_base"
placeholder="Your LM Studio API base URL"
/>
</InputLayouts.Vertical>
</FieldWrapper>
<FieldWrapper>
<InputLayouts.Vertical
name="custom_config.LM_STUDIO_API_KEY"
title="API Key"
subDescription="Optional API key if your LM Studio server requires authentication."
suffix="optional"
>
<PasswordInputTypeInField
name="custom_config.LM_STUDIO_API_KEY"
placeholder="API Key"
/>
</InputLayouts.Vertical>
</FieldWrapper>
{!isOnboarding && (
<>
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. llama3.1" />
) : (
<ModelsField
modelConfigurations={currentModels}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
/>
)}
{!isOnboarding && (
<>
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</LLMConfigurationModalWrapper>
);
}
export default function LMStudioForm({
variant = "llm-configuration",
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const [fetchedModels, setFetchedModels] = useState<ModelConfiguration[]>([]);
const [isTesting, setIsTesting] = useState(false);
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
LLMProviderName.LM_STUDIO
);
const onClose = () => onOpenChange?.(false);
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const initialValues: LMStudioFormValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: LLMProviderName.LM_STUDIO,
provider: LLMProviderName.LM_STUDIO,
api_base: DEFAULT_API_BASE,
default_model_name: "",
custom_config: {
LM_STUDIO_API_KEY: "",
},
} as LMStudioFormValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
custom_config: {
LM_STUDIO_API_KEY:
(existingLlmProvider?.custom_config?.LM_STUDIO_API_KEY as string) ??
"",
},
};
const validationSchema = isOnboarding
? Yup.object().shape({
api_base: Yup.string().required("API Base URL is required"),
default_model_name: Yup.string().required("Model name is required"),
})
: buildDefaultValidationSchema().shape({
api_base: Yup.string().required("API Base URL is required"),
});
return (
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
const filteredCustomConfig = Object.fromEntries(
Object.entries(values.custom_config || {}).filter(([, v]) => v !== "")
);
const submitValues = {
...values,
custom_config:
Object.keys(filteredCustomConfig).length > 0
? filteredCustomConfig
: undefined,
};
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
fetchedModels.length > 0 ? fetchedModels : [];
await submitOnboardingProvider({
providerName: LLMProviderName.LM_STUDIO,
payload: {
...submitValues,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: LLMProviderName.LM_STUDIO,
values: submitValues,
initialValues,
modelConfigurations:
fetchedModels.length > 0 ? fetchedModels : modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
{(formikProps) => (
<LMStudioFormInternals
formikProps={formikProps}
existingLlmProvider={existingLlmProvider}
fetchedModels={fetchedModels}
setFetchedModels={setFetchedModels}
isTesting={isTesting}
onClose={onClose}
isOnboarding={isOnboarding}
/>
)}
</Formik>
);
}

View File

@@ -1,184 +0,0 @@
"use client";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import * as InputLayouts from "@/layouts/input-layouts";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
} from "@/interfaces/llm";
import {
useInitialValues,
buildValidationSchema,
BaseLLMFormValues as BaseLLMModalValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
APIKeyField,
APIBaseField,
ModelSelectionField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchModels } from "@/lib/llmConfig/svc";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
const DEFAULT_API_BASE = "http://localhost:1234";
interface LMStudioModalValues extends BaseLLMModalValues {
api_base: string;
custom_config: {
LM_STUDIO_API_KEY?: string;
};
}
interface LMStudioModalInternalsProps {
existingLlmProvider: LLMProviderView | undefined;
isOnboarding: boolean;
}
function LMStudioModalInternals({
existingLlmProvider,
isOnboarding,
}: LMStudioModalInternalsProps) {
const formikProps = useFormikContext<LMStudioModalValues>();
const isFetchDisabled = !formikProps.values.api_base;
const handleFetchModels = async () => {
const apiKey = formikProps.values.custom_config?.LM_STUDIO_API_KEY;
const initialApiKey = existingLlmProvider?.custom_config?.LM_STUDIO_API_KEY;
const data = await fetchModels(LLMProviderName.LM_STUDIO, {
api_base: formikProps.values.api_base,
custom_config: apiKey ? { LM_STUDIO_API_KEY: apiKey } : {},
api_key_changed: apiKey !== initialApiKey,
name: existingLlmProvider?.name,
});
if (data.error) {
throw new Error(data.error);
}
formikProps.setFieldValue("model_configurations", data.models);
};
return (
<>
<APIBaseField
subDescription="The base URL for your LM Studio server."
placeholder="Your LM Studio API base URL"
/>
<APIKeyField
name="custom_config.LM_STUDIO_API_KEY"
optional
subDescription="Optional API key if your LM Studio server requires authentication."
/>
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
</>
)}
</>
);
}
export default function LMStudioModal({
variant = "llm-configuration",
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
}: LLMProviderFormProps) {
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const onClose = () => onOpenChange?.(false);
const initialValues: LMStudioModalValues = {
...useInitialValues(
isOnboarding,
LLMProviderName.LM_STUDIO,
existingLlmProvider
),
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
custom_config: {
LM_STUDIO_API_KEY: existingLlmProvider?.custom_config?.LM_STUDIO_API_KEY,
},
} as LMStudioModalValues;
const validationSchema = buildValidationSchema(isOnboarding, {
apiBase: true,
});
return (
<ModalWrapper
providerName={LLMProviderName.LM_STUDIO}
llmProvider={existingLlmProvider}
onClose={onClose}
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
const filteredCustomConfig = Object.fromEntries(
Object.entries(values.custom_config || {}).filter(([, v]) => v !== "")
);
const submitValues = {
...values,
custom_config:
Object.keys(filteredCustomConfig).length > 0
? filteredCustomConfig
: undefined,
};
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.LM_STUDIO,
values: submitValues,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
}}
>
<LMStudioModalInternals
existingLlmProvider={existingLlmProvider}
isOnboarding={isOnboarding}
/>
</ModalWrapper>
);
}

View File

@@ -1,31 +1,41 @@
"use client";
import { useState, useEffect } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import { Formik, FormikProps } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import { fetchLiteLLMProxyModels } from "@/lib/llmConfig/svc";
import { fetchLiteLLMProxyModels } from "@/app/admin/configuration/llm/utils";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
APIKeyField,
APIBaseField,
ModelSelectionField,
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
ModelsAccessField,
FieldSeparator,
FieldWrapper,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
const DEFAULT_API_BASE = "http://localhost:4000";
@@ -35,15 +45,30 @@ interface LiteLLMProxyModalValues extends BaseLLMFormValues {
}
interface LiteLLMProxyModalInternalsProps {
formikProps: FormikProps<LiteLLMProxyModalValues>;
existingLlmProvider: LLMProviderView | undefined;
fetchedModels: ModelConfiguration[];
setFetchedModels: (models: ModelConfiguration[]) => void;
modelConfigurations: ModelConfiguration[];
isTesting: boolean;
onClose: () => void;
isOnboarding: boolean;
}
function LiteLLMProxyModalInternals({
formikProps,
existingLlmProvider,
fetchedModels,
setFetchedModels,
modelConfigurations,
isTesting,
onClose,
isOnboarding,
}: LiteLLMProxyModalInternalsProps) {
const formikProps = useFormikContext<LiteLLMProxyModalValues>();
const currentModels =
fetchedModels.length > 0
? fetchedModels
: existingLlmProvider?.model_configurations || modelConfigurations;
const isFetchDisabled =
!formikProps.values.api_base || !formikProps.values.api_key;
@@ -52,43 +77,79 @@ function LiteLLMProxyModalInternals({
const { models, error } = await fetchLiteLLMProxyModels({
api_base: formikProps.values.api_base,
api_key: formikProps.values.api_key,
provider_name: LLMProviderName.LITELLM_PROXY,
provider_name: existingLlmProvider?.name,
});
if (error) {
throw new Error(error);
}
formikProps.setFieldValue("model_configurations", models);
setFetchedModels(models);
};
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return (
<>
<APIBaseField
subDescription="The base URL for your LiteLLM Proxy server."
placeholder="https://your-litellm-proxy.com"
/>
<LLMConfigurationModalWrapper
providerEndpoint={LLMProviderName.LITELLM_PROXY}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<FieldWrapper>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
subDescription="The base URL for your LiteLLM Proxy server."
>
<InputTypeInField
name="api_base"
placeholder="https://your-litellm-proxy.com"
/>
</InputLayouts.Vertical>
</FieldWrapper>
<APIKeyField providerName="LiteLLM Proxy" />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. gpt-4o" />
) : (
<ModelsField
modelConfigurations={currentModels}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
)}
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</>
</LLMConfigurationModalWrapper>
);
}
@@ -97,66 +158,107 @@ export default function LiteLLMProxyModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const [fetchedModels, setFetchedModels] = useState<ModelConfiguration[]>([]);
const [isTesting, setIsTesting] = useState(false);
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
LLMProviderName.LITELLM_PROXY
);
const onClose = () => onOpenChange?.(false);
const initialValues: LiteLLMProxyModalValues = {
...useInitialValues(
isOnboarding,
LLMProviderName.LITELLM_PROXY,
existingLlmProvider
),
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
} as LiteLLMProxyModalValues;
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const validationSchema = buildValidationSchema(isOnboarding, {
apiKey: true,
apiBase: true,
});
const initialValues: LiteLLMProxyModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: LLMProviderName.LITELLM_PROXY,
provider: LLMProviderName.LITELLM_PROXY,
api_key: "",
api_base: DEFAULT_API_BASE,
default_model_name: "",
} as LiteLLMProxyModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
};
const validationSchema = isOnboarding
? Yup.object().shape({
api_key: Yup.string().required("API Key is required"),
api_base: Yup.string().required("API Base URL is required"),
default_model_name: Yup.string().required("Model name is required"),
})
: buildDefaultValidationSchema().shape({
api_key: Yup.string().required("API Key is required"),
api_base: Yup.string().required("API Base URL is required"),
});
return (
<ModalWrapper
providerName={LLMProviderName.LITELLM_PROXY}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.LITELLM_PROXY,
values,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
fetchedModels.length > 0 ? fetchedModels : [];
await submitOnboardingProvider({
providerName: LLMProviderName.LITELLM_PROXY,
payload: {
...values,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: LLMProviderName.LITELLM_PROXY,
values,
initialValues,
modelConfigurations:
fetchedModels.length > 0 ? fetchedModels : modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<LiteLLMProxyModalInternals
existingLlmProvider={existingLlmProvider}
isOnboarding={isOnboarding}
/>
</ModalWrapper>
{(formikProps) => (
<LiteLLMProxyModalInternals
formikProps={formikProps}
existingLlmProvider={existingLlmProvider}
fetchedModels={fetchedModels}
setFetchedModels={setFetchedModels}
modelConfigurations={modelConfigurations}
isTesting={isTesting}
onClose={onClose}
isOnboarding={isOnboarding}
/>
)}
</Formik>
);
}

View File

@@ -1,43 +1,47 @@
"use client";
import * as Yup from "yup";
import { Dispatch, SetStateAction, useMemo, useState } from "react";
import { useEffect, useState } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import { Formik, FormikProps } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import PasswordInputTypeInField from "@/refresh-components/form/PasswordInputTypeInField";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
ModelSelectionField,
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
ModelsAccessField,
FieldSeparator,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { fetchOllamaModels } from "@/lib/llmConfig/svc";
import { fetchOllamaModels } from "@/app/admin/configuration/llm/utils";
import Tabs from "@/refresh-components/Tabs";
import { Card } from "@opal/components";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
const OLLAMA_PROVIDER_NAME = "ollama_chat";
const DEFAULT_API_BASE = "http://127.0.0.1:11434";
const CLOUD_API_BASE = "https://ollama.com";
enum Tab {
TAB_SELF_HOSTED = "self-hosted",
TAB_CLOUD = "cloud",
}
const TAB_SELF_HOSTED = "self-hosted";
const TAB_CLOUD = "cloud";
interface OllamaModalValues extends BaseLLMFormValues {
api_base: string;
@@ -47,28 +51,24 @@ interface OllamaModalValues extends BaseLLMFormValues {
}
interface OllamaModalInternalsProps {
formikProps: FormikProps<OllamaModalValues>;
existingLlmProvider: LLMProviderView | undefined;
fetchedModels: ModelConfiguration[];
setFetchedModels: (models: ModelConfiguration[]) => void;
isTesting: boolean;
onClose: () => void;
isOnboarding: boolean;
tab: Tab;
setTab: Dispatch<SetStateAction<Tab>>;
}
function OllamaModalInternals({
formikProps,
existingLlmProvider,
fetchedModels,
setFetchedModels,
isTesting,
onClose,
isOnboarding,
tab,
setTab,
}: OllamaModalInternalsProps) {
const formikProps = useFormikContext<OllamaModalValues>();
const isFetchDisabled = useMemo(
() =>
tab === Tab.TAB_SELF_HOSTED
? !formikProps.values.api_base
: !formikProps.values.custom_config.OLLAMA_API_KEY,
[tab, formikProps]
);
const handleFetchModels = async (signal?: AbortSignal) => {
// Only Ollama cloud accepts API key
const apiBase = formikProps.values.custom_config?.OLLAMA_API_KEY
@@ -83,20 +83,49 @@ function OllamaModalInternals({
if (error) {
throw new Error(error);
}
formikProps.setFieldValue("model_configurations", models);
setFetchedModels(models);
};
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const currentModels =
fetchedModels.length > 0
? fetchedModels
: existingLlmProvider?.model_configurations || [];
const hasApiKey = !!formikProps.values.custom_config?.OLLAMA_API_KEY;
const defaultTab =
existingLlmProvider && hasApiKey ? TAB_CLOUD : TAB_SELF_HOSTED;
return (
<>
<LLMConfigurationModalWrapper
providerEndpoint={OLLAMA_PROVIDER_NAME}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<Card background="light" border="none" padding="sm">
<Tabs value={tab} onValueChange={(value) => setTab(value as Tab)}>
<Tabs defaultValue={defaultTab}>
<Tabs.List>
<Tabs.Trigger value={Tab.TAB_SELF_HOSTED}>
<Tabs.Trigger value={TAB_SELF_HOSTED}>
Self-hosted Ollama
</Tabs.Trigger>
<Tabs.Trigger value={Tab.TAB_CLOUD}>Ollama Cloud</Tabs.Trigger>
<Tabs.Trigger value={TAB_CLOUD}>Ollama Cloud</Tabs.Trigger>
</Tabs.List>
<Tabs.Content value={Tab.TAB_SELF_HOSTED} padding={0}>
<Tabs.Content value={TAB_SELF_HOSTED}>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
@@ -109,7 +138,7 @@ function OllamaModalInternals({
</InputLayouts.Vertical>
</Tabs.Content>
<Tabs.Content value={Tab.TAB_CLOUD}>
<Tabs.Content value={TAB_CLOUD}>
<InputLayouts.Vertical
name="custom_config.OLLAMA_API_KEY"
title="API Key"
@@ -126,24 +155,32 @@ function OllamaModalInternals({
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. llama3.1" />
) : (
<ModelsField
modelConfigurations={currentModels}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
onRefetch={handleFetchModels}
/>
)}
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</>
</LLMConfigurationModalWrapper>
);
}
@@ -152,53 +189,65 @@ export default function OllamaModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const [fetchedModels, setFetchedModels] = useState<ModelConfiguration[]>([]);
const [isTesting, setIsTesting] = useState(false);
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const apiKey = existingLlmProvider?.custom_config?.OLLAMA_API_KEY;
const defaultTab =
existingLlmProvider && !!apiKey ? Tab.TAB_CLOUD : Tab.TAB_SELF_HOSTED;
const [tab, setTab] = useState<Tab>(defaultTab);
const { wellKnownLLMProvider } =
useWellKnownLLMProvider(OLLAMA_PROVIDER_NAME);
const onClose = () => onOpenChange?.(false);
const initialValues: OllamaModalValues = {
...useInitialValues(
isOnboarding,
LLMProviderName.OLLAMA_CHAT,
existingLlmProvider
),
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
custom_config: {
OLLAMA_API_KEY: apiKey,
},
} as OllamaModalValues;
const validationSchema = useMemo(
() =>
buildValidationSchema(isOnboarding, {
apiBase: tab === Tab.TAB_SELF_HOSTED,
extra:
tab === Tab.TAB_CLOUD
? {
custom_config: Yup.object({
OLLAMA_API_KEY: Yup.string().required("API Key is required"),
}),
}
: undefined,
}),
[tab, isOnboarding]
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const initialValues: OllamaModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: OLLAMA_PROVIDER_NAME,
provider: OLLAMA_PROVIDER_NAME,
api_base: DEFAULT_API_BASE,
default_model_name: "",
custom_config: {
OLLAMA_API_KEY: "",
},
} as OllamaModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
custom_config: {
OLLAMA_API_KEY:
(existingLlmProvider?.custom_config?.OLLAMA_API_KEY as string) ??
"",
},
};
const validationSchema = isOnboarding
? Yup.object().shape({
api_base: Yup.string().required("API Base URL is required"),
default_model_name: Yup.string().required("Model name is required"),
})
: buildDefaultValidationSchema().shape({
api_base: Yup.string().required("API Base URL is required"),
});
return (
<ModalWrapper
providerName={LLMProviderName.OLLAMA_CHAT}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
const filteredCustomConfig = Object.fromEntries(
Object.entries(values.custom_config || {}).filter(([, v]) => v !== "")
);
@@ -214,39 +263,50 @@ export default function OllamaModal({
: undefined,
};
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.OLLAMA_CHAT,
values: submitValues,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
fetchedModels.length > 0 ? fetchedModels : [];
await submitOnboardingProvider({
providerName: OLLAMA_PROVIDER_NAME,
payload: {
...submitValues,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: OLLAMA_PROVIDER_NAME,
values: submitValues,
initialValues,
modelConfigurations:
fetchedModels.length > 0 ? fetchedModels : modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<OllamaModalInternals
existingLlmProvider={existingLlmProvider}
isOnboarding={isOnboarding}
tab={tab}
setTab={setTab}
/>
</ModalWrapper>
{(formikProps) => (
<OllamaModalInternals
formikProps={formikProps}
existingLlmProvider={existingLlmProvider}
fetchedModels={fetchedModels}
setFetchedModels={setFetchedModels}
isTesting={isTesting}
onClose={onClose}
isOnboarding={isOnboarding}
/>
)}
</Formik>
);
}

View File

@@ -1,32 +1,44 @@
"use client";
import { useState, useEffect } from "react";
import { markdown } from "@opal/utils";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import { Formik, FormikProps } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import PasswordInputTypeInField from "@/refresh-components/form/PasswordInputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import { fetchOpenAICompatibleModels } from "@/lib/llmConfig/svc";
import { fetchOpenAICompatibleModels } from "@/app/admin/configuration/llm/utils";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
APIBaseField,
APIKeyField,
ModelSelectionField,
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
ModelsAccessField,
FieldSeparator,
FieldWrapper,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
const OPENAI_COMPATIBLE_PROVIDER = LLMProviderName.OPENAI_COMPATIBLE;
const DEFAULT_API_BASE = "";
interface OpenAICompatibleModalValues extends BaseLLMFormValues {
api_key: string;
@@ -34,15 +46,30 @@ interface OpenAICompatibleModalValues extends BaseLLMFormValues {
}
interface OpenAICompatibleModalInternalsProps {
formikProps: FormikProps<OpenAICompatibleModalValues>;
existingLlmProvider: LLMProviderView | undefined;
fetchedModels: ModelConfiguration[];
setFetchedModels: (models: ModelConfiguration[]) => void;
modelConfigurations: ModelConfiguration[];
isTesting: boolean;
onClose: () => void;
isOnboarding: boolean;
}
function OpenAICompatibleModalInternals({
formikProps,
existingLlmProvider,
fetchedModels,
setFetchedModels,
modelConfigurations,
isTesting,
onClose,
isOnboarding,
}: OpenAICompatibleModalInternalsProps) {
const formikProps = useFormikContext<OpenAICompatibleModalValues>();
const currentModels =
fetchedModels.length > 0
? fetchedModels
: existingLlmProvider?.model_configurations || modelConfigurations;
const isFetchDisabled = !formikProps.values.api_base;
@@ -55,43 +82,81 @@ function OpenAICompatibleModalInternals({
if (error) {
throw new Error(error);
}
formikProps.setFieldValue("model_configurations", models);
setFetchedModels(models);
};
return (
<>
<APIBaseField
subDescription="The base URL of your OpenAI-compatible server."
placeholder="http://localhost:8000/v1"
/>
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
<APIKeyField
optional
subDescription={markdown(
"Provide an API key if your server requires authentication."
)}
/>
return (
<LLMConfigurationModalWrapper
providerEndpoint={LLMProviderName.OPENAI_COMPATIBLE}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<FieldWrapper>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
subDescription="The base URL of your OpenAI-compatible server."
>
<InputTypeInField
name="api_base"
placeholder="http://localhost:8000/v1"
/>
</InputLayouts.Vertical>
</FieldWrapper>
<FieldWrapper>
<InputLayouts.Vertical
name="api_key"
title="API Key"
suffix="optional"
subDescription={markdown(
"Provide an API key if your server requires authentication."
)}
>
<PasswordInputTypeInField name="api_key" placeholder="API Key" />
</InputLayouts.Vertical>
</FieldWrapper>
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField
<FieldSeparator />
<ModelsField
modelConfigurations={currentModels}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</>
</LLMConfigurationModalWrapper>
);
}
@@ -100,62 +165,100 @@ export default function OpenAICompatibleModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const [fetchedModels, setFetchedModels] = useState<ModelConfiguration[]>([]);
const [isTesting, setIsTesting] = useState(false);
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
OPENAI_COMPATIBLE_PROVIDER
);
const onClose = () => onOpenChange?.(false);
const initialValues = useInitialValues(
isOnboarding,
LLMProviderName.OPENAI_COMPATIBLE,
existingLlmProvider
) as OpenAICompatibleModalValues;
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const validationSchema = buildValidationSchema(isOnboarding, {
apiBase: true,
const initialValues: OpenAICompatibleModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: OPENAI_COMPATIBLE_PROVIDER,
provider: OPENAI_COMPATIBLE_PROVIDER,
api_key: "",
api_base: DEFAULT_API_BASE,
default_model_name: "",
} as OpenAICompatibleModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
};
const validationSchema = buildDefaultValidationSchema().shape({
api_base: Yup.string().required("API Base URL is required"),
});
return (
<ModalWrapper
providerName={LLMProviderName.OPENAI_COMPATIBLE}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.OPENAI_COMPATIBLE,
values,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
fetchedModels.length > 0 ? fetchedModels : [];
await submitOnboardingProvider({
providerName: OPENAI_COMPATIBLE_PROVIDER,
payload: {
...values,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: OPENAI_COMPATIBLE_PROVIDER,
values,
initialValues,
modelConfigurations:
fetchedModels.length > 0 ? fetchedModels : modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<OpenAICompatibleModalInternals
existingLlmProvider={existingLlmProvider}
isOnboarding={isOnboarding}
/>
</ModalWrapper>
{(formikProps) => (
<OpenAICompatibleModalInternals
formikProps={formikProps}
existingLlmProvider={existingLlmProvider}
fetchedModels={fetchedModels}
setFetchedModels={setFetchedModels}
modelConfigurations={modelConfigurations}
isTesting={isTesting}
onClose={onClose}
isOnboarding={isOnboarding}
/>
)}
</Formik>
);
}

View File

@@ -1,99 +1,172 @@
"use client";
import { useState } from "react";
import { useSWRConfig } from "swr";
import { LLMProviderFormProps, LLMProviderName } from "@/interfaces/llm";
import { Formik } from "formik";
import { LLMProviderFormProps } from "@/interfaces/llm";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
APIKeyField,
ModelSelectionField,
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
FieldSeparator,
ModelsAccessField,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import * as InputLayouts from "@/layouts/input-layouts";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { toast } from "@/hooks/useToast";
const OPENAI_PROVIDER_NAME = "openai";
const DEFAULT_DEFAULT_MODEL_NAME = "gpt-5.2";
export default function OpenAIModal({
variant = "llm-configuration",
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const isOnboarding = variant === "onboarding";
const [isTesting, setIsTesting] = useState(false);
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } =
useWellKnownLLMProvider(OPENAI_PROVIDER_NAME);
const onClose = () => onOpenChange?.(false);
const initialValues = useInitialValues(
isOnboarding,
LLMProviderName.OPENAI,
existingLlmProvider
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const validationSchema = buildValidationSchema(isOnboarding, {
apiKey: true,
});
const initialValues = isOnboarding
? {
...buildOnboardingInitialValues(),
name: OPENAI_PROVIDER_NAME,
provider: OPENAI_PROVIDER_NAME,
api_key: "",
default_model_name: DEFAULT_DEFAULT_MODEL_NAME,
}
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
default_model_name:
(defaultModelName &&
modelConfigurations.some((m) => m.name === defaultModelName)
? defaultModelName
: undefined) ??
wellKnownLLMProvider?.recommended_default_model?.name ??
DEFAULT_DEFAULT_MODEL_NAME,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? true,
};
const validationSchema = isOnboarding
? Yup.object().shape({
api_key: Yup.string().required("API Key is required"),
default_model_name: Yup.string().required("Model name is required"),
})
: buildDefaultValidationSchema().shape({
api_key: Yup.string().required("API Key is required"),
});
return (
<ModalWrapper
providerName={LLMProviderName.OPENAI}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.OPENAI,
values,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
(wellKnownLLMProvider ?? llmDescriptor)?.known_models ?? [];
await submitOnboardingProvider({
providerName: OPENAI_PROVIDER_NAME,
payload: {
...values,
model_configurations: modelConfigsToUse,
is_auto_mode:
values.default_model_name === DEFAULT_DEFAULT_MODEL_NAME,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: OPENAI_PROVIDER_NAME,
values,
initialValues,
modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<APIKeyField providerName="OpenAI" />
{(formikProps) => (
<LLMConfigurationModalWrapper
providerEndpoint={OPENAI_PROVIDER_NAME}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<APIKeyField providerName="OpenAI" />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
{!isOnboarding && (
<>
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. gpt-5.2" />
) : (
<ModelsField
modelConfigurations={modelConfigurations}
formikProps={formikProps}
recommendedDefaultModel={
wellKnownLLMProvider?.recommended_default_model ?? null
}
shouldShowAutoUpdateToggle={true}
/>
)}
{!isOnboarding && (
<>
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</LLMConfigurationModalWrapper>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField shouldShowAutoUpdateToggle={true} />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
</>
)}
</ModalWrapper>
</Formik>
);
}

View File

@@ -1,49 +1,73 @@
"use client";
import { useState, useEffect } from "react";
import { useSWRConfig } from "swr";
import { useFormikContext } from "formik";
import { Formik, FormikProps } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import { fetchOpenRouterModels } from "@/lib/llmConfig/svc";
import { fetchOpenRouterModels } from "@/app/admin/configuration/llm/utils";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
APIKeyField,
APIBaseField,
ModelSelectionField,
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
ModelsAccessField,
FieldSeparator,
FieldWrapper,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { toast } from "@/hooks/useToast";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
const OPENROUTER_PROVIDER_NAME = "openrouter";
const DEFAULT_API_BASE = "https://openrouter.ai/api/v1";
interface OpenRouterModalValues extends BaseLLMFormValues {
api_key: string;
api_base: string;
}
interface OpenRouterModalInternalsProps {
formikProps: FormikProps<OpenRouterModalValues>;
existingLlmProvider: LLMProviderView | undefined;
fetchedModels: ModelConfiguration[];
setFetchedModels: (models: ModelConfiguration[]) => void;
modelConfigurations: ModelConfiguration[];
isTesting: boolean;
onClose: () => void;
isOnboarding: boolean;
}
function OpenRouterModalInternals({
formikProps,
existingLlmProvider,
fetchedModels,
setFetchedModels,
modelConfigurations,
isTesting,
onClose,
isOnboarding,
}: OpenRouterModalInternalsProps) {
const formikProps = useFormikContext<OpenRouterModalValues>();
const currentModels =
fetchedModels.length > 0
? fetchedModels
: existingLlmProvider?.model_configurations || modelConfigurations;
const isFetchDisabled =
!formikProps.values.api_base || !formikProps.values.api_key;
@@ -52,43 +76,79 @@ function OpenRouterModalInternals({
const { models, error } = await fetchOpenRouterModels({
api_base: formikProps.values.api_base,
api_key: formikProps.values.api_key,
provider_name: LLMProviderName.OPENROUTER,
provider_name: existingLlmProvider?.name,
});
if (error) {
throw new Error(error);
}
formikProps.setFieldValue("model_configurations", models);
setFetchedModels(models);
};
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return (
<>
<APIBaseField
subDescription="Paste your OpenRouter-compatible endpoint URL or use OpenRouter API directly."
placeholder="Your OpenRouter base URL"
/>
<LLMConfigurationModalWrapper
providerEndpoint={OPENROUTER_PROVIDER_NAME}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<FieldWrapper>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
subDescription="Paste your OpenRouter-compatible endpoint URL or use OpenRouter API directly."
>
<InputTypeInField
name="api_base"
placeholder="Your OpenRouter base URL"
/>
</InputLayouts.Vertical>
</FieldWrapper>
<APIKeyField providerName="OpenRouter" />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. openai/gpt-4o" />
) : (
<ModelsField
modelConfigurations={currentModels}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
)}
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</>
</LLMConfigurationModalWrapper>
);
}
@@ -97,66 +157,107 @@ export default function OpenRouterModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const [fetchedModels, setFetchedModels] = useState<ModelConfiguration[]>([]);
const [isTesting, setIsTesting] = useState(false);
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
OPENROUTER_PROVIDER_NAME
);
const onClose = () => onOpenChange?.(false);
const initialValues: OpenRouterModalValues = {
...useInitialValues(
isOnboarding,
LLMProviderName.OPENROUTER,
existingLlmProvider
),
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
} as OpenRouterModalValues;
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const validationSchema = buildValidationSchema(isOnboarding, {
apiKey: true,
apiBase: true,
});
const initialValues: OpenRouterModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: OPENROUTER_PROVIDER_NAME,
provider: OPENROUTER_PROVIDER_NAME,
api_key: "",
api_base: DEFAULT_API_BASE,
default_model_name: "",
} as OpenRouterModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
};
const validationSchema = isOnboarding
? Yup.object().shape({
api_key: Yup.string().required("API Key is required"),
api_base: Yup.string().required("API Base URL is required"),
default_model_name: Yup.string().required("Model name is required"),
})
: buildDefaultValidationSchema().shape({
api_key: Yup.string().required("API Key is required"),
api_base: Yup.string().required("API Base URL is required"),
});
return (
<ModalWrapper
providerName={LLMProviderName.OPENROUTER}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.OPENROUTER,
values,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
fetchedModels.length > 0 ? fetchedModels : [];
await submitOnboardingProvider({
providerName: OPENROUTER_PROVIDER_NAME,
payload: {
...values,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: OPENROUTER_PROVIDER_NAME,
values,
initialValues,
modelConfigurations:
fetchedModels.length > 0 ? fetchedModels : modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<OpenRouterModalInternals
existingLlmProvider={existingLlmProvider}
isOnboarding={isOnboarding}
/>
</ModalWrapper>
{(formikProps) => (
<OpenRouterModalInternals
formikProps={formikProps}
existingLlmProvider={existingLlmProvider}
fetchedModels={fetchedModels}
setFetchedModels={setFetchedModels}
modelConfigurations={modelConfigurations}
isTesting={isTesting}
onClose={onClose}
isOnboarding={isOnboarding}
/>
)}
</Formik>
);
}

View File

@@ -1,27 +1,38 @@
"use client";
import { useState } from "react";
import { useSWRConfig } from "swr";
import { Formik } from "formik";
import { FileUploadFormField } from "@/components/Field";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import { LLMProviderFormProps, LLMProviderName } from "@/interfaces/llm";
import { LLMProviderFormProps } from "@/interfaces/llm";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
useInitialValues,
buildValidationSchema,
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import { submitProvider } from "@/sections/modals/llmConfig/svc";
import { LLMProviderConfiguredSource } from "@/lib/analytics";
import {
ModelSelectionField,
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
ModelsField,
DisplayNameField,
ModelAccessField,
ModalWrapper,
FieldSeparator,
FieldWrapper,
ModelsAccessField,
SingleDefaultModelField,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { toast } from "@/hooks/useToast";
const VERTEXAI_PROVIDER_NAME = "vertex_ai";
const VERTEXAI_DISPLAY_NAME = "Google Cloud Vertex AI";
const VERTEXAI_DEFAULT_MODEL = "gemini-2.5-pro";
const VERTEXAI_DEFAULT_LOCATION = "global";
interface VertexAIModalValues extends BaseLLMFormValues {
@@ -36,48 +47,85 @@ export default function VertexAIModal({
existingLlmProvider,
shouldMarkAsDefault,
onOpenChange,
onSuccess,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const isOnboarding = variant === "onboarding";
const [isTesting, setIsTesting] = useState(false);
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
VERTEXAI_PROVIDER_NAME
);
const onClose = () => onOpenChange?.(false);
const initialValues: VertexAIModalValues = {
...useInitialValues(
isOnboarding,
LLMProviderName.VERTEX_AI,
existingLlmProvider
),
custom_config: {
vertex_credentials:
(existingLlmProvider?.custom_config?.vertex_credentials as string) ??
"",
vertex_location:
(existingLlmProvider?.custom_config?.vertex_location as string) ??
VERTEXAI_DEFAULT_LOCATION,
},
} as VertexAIModalValues;
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const validationSchema = buildValidationSchema(isOnboarding, {
extra: {
custom_config: Yup.object({
vertex_credentials: Yup.string().required(
"Credentials file is required"
const initialValues: VertexAIModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: VERTEXAI_PROVIDER_NAME,
provider: VERTEXAI_PROVIDER_NAME,
default_model_name: VERTEXAI_DEFAULT_MODEL,
custom_config: {
vertex_credentials: "",
vertex_location: VERTEXAI_DEFAULT_LOCATION,
},
} as VertexAIModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
vertex_location: Yup.string(),
}),
},
});
default_model_name:
(defaultModelName &&
modelConfigurations.some((m) => m.name === defaultModelName)
? defaultModelName
: undefined) ??
wellKnownLLMProvider?.recommended_default_model?.name ??
VERTEXAI_DEFAULT_MODEL,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? true,
custom_config: {
vertex_credentials:
(existingLlmProvider?.custom_config
?.vertex_credentials as string) ?? "",
vertex_location:
(existingLlmProvider?.custom_config?.vertex_location as string) ??
VERTEXAI_DEFAULT_LOCATION,
},
};
const validationSchema = isOnboarding
? Yup.object().shape({
default_model_name: Yup.string().required("Model name is required"),
custom_config: Yup.object({
vertex_credentials: Yup.string().required(
"Credentials file is required"
),
vertex_location: Yup.string(),
}),
})
: buildDefaultValidationSchema().shape({
custom_config: Yup.object({
vertex_credentials: Yup.string().required(
"Credentials file is required"
),
vertex_location: Yup.string(),
}),
});
return (
<ModalWrapper
providerName={LLMProviderName.VERTEX_AI}
llmProvider={existingLlmProvider}
onClose={onClose}
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
onSubmit={async (values, { setSubmitting, setStatus }) => {
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
const filteredCustomConfig = Object.fromEntries(
Object.entries(values.custom_config || {}).filter(
([key, v]) => key === "vertex_credentials" || v !== ""
@@ -92,75 +140,101 @@ export default function VertexAIModal({
: undefined,
};
await submitProvider({
analyticsSource: isOnboarding
? LLMProviderConfiguredSource.CHAT_ONBOARDING
: LLMProviderConfiguredSource.ADMIN_PAGE,
providerName: LLMProviderName.VERTEX_AI,
values: submitValues,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
setStatus,
setSubmitting,
onClose,
onSuccess: async () => {
if (onSuccess) {
await onSuccess();
} else {
await refreshLlmProviderCaches(mutate);
toast.success(
existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!"
);
}
},
});
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
(wellKnownLLMProvider ?? llmDescriptor)?.known_models ?? [];
await submitOnboardingProvider({
providerName: VERTEXAI_PROVIDER_NAME,
payload: {
...submitValues,
model_configurations: modelConfigsToUse,
is_auto_mode:
values.default_model_name === VERTEXAI_DEFAULT_MODEL,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: VERTEXAI_PROVIDER_NAME,
values: submitValues,
initialValues,
modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="custom_config.vertex_location"
title="Google Cloud Region Name"
subDescription="Region where your Google Vertex AI models are hosted. See full list of regions supported at Google Cloud."
{(formikProps) => (
<LLMConfigurationModalWrapper
providerEndpoint={VERTEXAI_PROVIDER_NAME}
providerName={VERTEXAI_DISPLAY_NAME}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<InputTypeInField
name="custom_config.vertex_location"
placeholder={VERTEXAI_DEFAULT_LOCATION}
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<FieldWrapper>
<InputLayouts.Vertical
name="custom_config.vertex_location"
title="Google Cloud Region Name"
subDescription="Region where your Google Vertex AI models are hosted. See full list of regions supported at Google Cloud."
>
<InputTypeInField
name="custom_config.vertex_location"
placeholder={VERTEXAI_DEFAULT_LOCATION}
/>
</InputLayouts.Vertical>
</FieldWrapper>
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="custom_config.vertex_credentials"
title="API Key"
subDescription="Attach your API key JSON from Google Cloud to access your models."
>
<FileUploadFormField
name="custom_config.vertex_credentials"
label=""
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<FieldWrapper>
<InputLayouts.Vertical
name="custom_config.vertex_credentials"
title="API Key"
subDescription="Attach your API key JSON from Google Cloud to access your models."
>
<FileUploadFormField
name="custom_config.vertex_credentials"
label=""
/>
</InputLayouts.Vertical>
</FieldWrapper>
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
<FieldSeparator />
{!isOnboarding && (
<DisplayNameField disabled={!!existingLlmProvider} />
)}
<FieldSeparator />
{isOnboarding ? (
<SingleDefaultModelField placeholder="E.g. gemini-2.5-pro" />
) : (
<ModelsField
modelConfigurations={modelConfigurations}
formikProps={formikProps}
recommendedDefaultModel={
wellKnownLLMProvider?.recommended_default_model ?? null
}
shouldShowAutoUpdateToggle={true}
/>
)}
{!isOnboarding && <ModelsAccessField formikProps={formikProps} />}
</LLMConfigurationModalWrapper>
)}
<InputLayouts.FieldSeparator />
<ModelSelectionField shouldShowAutoUpdateToggle={true} />
{!isOnboarding && (
<>
<InputLayouts.FieldSeparator />
<ModelAccessField />
</>
)}
</ModalWrapper>
</Formik>
);
}

View File

@@ -7,7 +7,7 @@ import VertexAIModal from "@/sections/modals/llmConfig/VertexAIModal";
import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
import LMStudioModal from "@/sections/modals/llmConfig/LMStudioModal";
import LMStudioForm from "@/sections/modals/llmConfig/LMStudioForm";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
@@ -62,7 +62,7 @@ export function getModalForExistingProvider(
case LLMProviderName.BEDROCK:
return <BedrockModal {...props} />;
case LLMProviderName.LM_STUDIO:
return <LMStudioModal {...props} />;
return <LMStudioForm {...props} />;
case LLMProviderName.LITELLM_PROXY:
return <LiteLLMProxyModal {...props} />;
case LLMProviderName.BIFROST:

View File

@@ -1,14 +1,11 @@
"use client";
import React, { useEffect, useRef, useState } from "react";
import { Formik, Form, useFormikContext } from "formik";
import type { FormikConfig } from "formik";
import { cn } from "@/lib/utils";
import { Interactive } from "@opal/core";
import { ReactNode, useEffect, useRef, useState } from "react";
import { Form, FormikProps } from "formik";
import { usePaidEnterpriseFeaturesEnabled } from "@/components/settings/usePaidEnterpriseFeaturesEnabled";
import { useAgents } from "@/hooks/useAgents";
import { useUserGroups } from "@/lib/hooks";
import { LLMProviderView, ModelConfiguration } from "@/interfaces/llm";
import { ModelConfiguration, SimpleKnownModel } from "@/interfaces/llm";
import * as InputLayouts from "@/layouts/input-layouts";
import Checkbox from "@/refresh-components/inputs/Checkbox";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
@@ -18,14 +15,15 @@ import InputSelect from "@/refresh-components/inputs/InputSelect";
import PasswordInputTypeInField from "@/refresh-components/form/PasswordInputTypeInField";
import Switch from "@/refresh-components/inputs/Switch";
import Text from "@/refresh-components/texts/Text";
import { Button, LineItemButton } from "@opal/components";
import { Button, LineItemButton, Tag } from "@opal/components";
import { BaseLLMFormValues } from "@/sections/modals/llmConfig/utils";
import type { RichStr } from "@opal/types";
import { RichStr, WithoutStyles } from "@opal/types";
import Separator from "@/refresh-components/Separator";
import { Section } from "@/layouts/general-layouts";
import { Hoverable } from "@opal/core";
import { Content } from "@opal/layouts";
import {
SvgArrowExchange,
SvgChevronDown,
SvgOnyxOctagon,
SvgOrganization,
SvgPlusCircle,
@@ -50,14 +48,27 @@ import {
getProviderProductName,
} from "@/lib/llmConfig/providers";
export function FieldSeparator() {
return <Separator noPadding className="p-2" />;
}
export type FieldWrapperProps = WithoutStyles<
React.HTMLAttributes<HTMLDivElement>
>;
export function FieldWrapper(props: FieldWrapperProps) {
return <div {...props} className="p-2 w-full" />;
}
// ─── DisplayNameField ────────────────────────────────────────────────────────
export interface DisplayNameFieldProps {
disabled?: boolean;
}
export function DisplayNameField({ disabled = false }: DisplayNameFieldProps) {
return (
<InputLayouts.FieldPadder>
<FieldWrapper>
<InputLayouts.Vertical
name="name"
title="Display Name"
@@ -69,29 +80,27 @@ export function DisplayNameField({ disabled = false }: DisplayNameFieldProps) {
variant={disabled ? "disabled" : undefined}
/>
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
</FieldWrapper>
);
}
// ─── APIKeyField ─────────────────────────────────────────────────────────────
export interface APIKeyFieldProps {
/** Formik field name. @default "api_key" */
name?: string;
optional?: boolean;
providerName?: string;
subDescription?: string | RichStr;
}
export function APIKeyField({
name = "api_key",
optional = false,
providerName,
subDescription,
}: APIKeyFieldProps) {
return (
<InputLayouts.FieldPadder>
<FieldWrapper>
<InputLayouts.Vertical
name={name}
name="api_key"
title="API Key"
subDescription={
subDescription
@@ -102,35 +111,29 @@ export function APIKeyField({
}
suffix={optional ? "optional" : undefined}
>
<PasswordInputTypeInField name={name} />
<PasswordInputTypeInField name="api_key" />
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
</FieldWrapper>
);
}
// ─── APIBaseField ───────────────────────────────────────────────────────────
// ─── SingleDefaultModelField ─────────────────────────────────────────────────
export interface APIBaseFieldProps {
optional?: boolean;
subDescription?: string | RichStr;
export interface SingleDefaultModelFieldProps {
placeholder?: string;
}
export function APIBaseField({
optional = false,
subDescription,
placeholder = "https://",
}: APIBaseFieldProps) {
export function SingleDefaultModelField({
placeholder = "E.g. gpt-4o",
}: SingleDefaultModelFieldProps) {
return (
<InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
subDescription={subDescription}
suffix={optional ? "optional" : undefined}
>
<InputTypeInField name="api_base" placeholder={placeholder} />
</InputLayouts.Vertical>
</InputLayouts.FieldPadder>
<InputLayouts.Vertical
name="default_model_name"
title="Default Model"
description="The model to use by default for this provider unless otherwise specified."
>
<InputTypeInField name="default_model_name" placeholder={placeholder} />
</InputLayouts.Vertical>
);
}
@@ -140,8 +143,13 @@ export function APIBaseField({
const GROUP_PREFIX = "group:";
const AGENT_PREFIX = "agent:";
export function ModelAccessField() {
const formikProps = useFormikContext<BaseLLMFormValues>();
interface ModelsAccessFieldProps<T> {
formikProps: FormikProps<T>;
}
export function ModelsAccessField<T extends BaseLLMFormValues>({
formikProps,
}: ModelsAccessFieldProps<T>) {
const { agents } = useAgents();
const { data: userGroups, isLoading: userGroupsIsLoading } = useUserGroups();
const { data: usersData } = useUsers({ includeApiKeys: false });
@@ -224,7 +232,7 @@ export function ModelAccessField() {
return (
<div className="flex flex-col w-full">
<InputLayouts.FieldPadder>
<FieldWrapper>
<InputLayouts.Horizontal
name="is_public"
title="Models Access"
@@ -245,7 +253,7 @@ export function ModelAccessField() {
</InputSelect.Content>
</InputSelect>
</InputLayouts.Horizontal>
</InputLayouts.FieldPadder>
</FieldWrapper>
{!isPublic && (
<Card background="light" border="none" padding="sm">
@@ -311,7 +319,7 @@ export function ModelAccessField() {
</div>
)}
<InputLayouts.FieldSeparator />
<FieldSeparator />
{selectedAgentIds.length > 0 ? (
<div className="grid grid-cols-2 gap-1 w-full">
@@ -370,12 +378,12 @@ export function ModelAccessField() {
* Manages an AbortController so that clicking the button cancels any
* in-flight fetch before starting a new one. Also aborts on unmount.
*/
interface RefetchButtonProps {
function RefetchButton({
onRefetch,
}: {
onRefetch: (signal: AbortSignal) => Promise<void> | void;
}
function RefetchButton({ onRefetch }: RefetchButtonProps) {
}) {
const abortRef = useRef<AbortController | null>(null);
const [isFetching, setIsFetching] = useState(false);
useEffect(() => {
return () => abortRef.current?.abort();
@@ -384,12 +392,11 @@ function RefetchButton({ onRefetch }: RefetchButtonProps) {
return (
<Button
prominence="tertiary"
icon={isFetching ? SimpleLoader : SvgRefreshCw}
icon={SvgRefreshCw}
onClick={async () => {
abortRef.current?.abort();
const controller = new AbortController();
abortRef.current = controller;
setIsFetching(true);
try {
await onRefetch(controller.signal);
} catch (err) {
@@ -397,88 +404,95 @@ function RefetchButton({ onRefetch }: RefetchButtonProps) {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
} finally {
if (!controller.signal.aborted) {
setIsFetching(false);
}
}
}}
disabled={isFetching}
/>
);
}
// ─── ModelsField ─────────────────────────────────────────────────────
const FOLD_THRESHOLD = 3;
export interface ModelSelectionFieldProps {
export interface ModelsFieldProps<T> {
formikProps: FormikProps<T>;
modelConfigurations: ModelConfiguration[];
recommendedDefaultModel: SimpleKnownModel | null;
shouldShowAutoUpdateToggle: boolean;
onRefetch?: (signal: AbortSignal) => Promise<void> | void;
/** Called when the user adds a custom model by name. Enables the "Add Model" input. */
onAddModel?: (modelName: string) => void;
}
export function ModelSelectionField({
export function ModelsField<T extends BaseLLMFormValues>({
formikProps,
modelConfigurations,
recommendedDefaultModel,
shouldShowAutoUpdateToggle,
onRefetch,
onAddModel,
}: ModelSelectionFieldProps) {
const formikProps = useFormikContext<BaseLLMFormValues>();
}: ModelsFieldProps<T>) {
const [newModelName, setNewModelName] = useState("");
const [isExpanded, setIsExpanded] = useState(false);
// When the auto-update toggle is hidden, auto mode should have no effect —
// otherwise models can't be deselected and "Select All" stays disabled.
const isAutoMode =
shouldShowAutoUpdateToggle && formikProps.values.is_auto_mode;
const models = formikProps.values.model_configurations;
const isAutoMode = formikProps.values.is_auto_mode;
const selectedModels = formikProps.values.selected_model_names ?? [];
const defaultModel = formikProps.values.default_model_name;
// Snapshot the original model visibility so we can restore it when
// toggling auto mode back on.
const originalModelsRef = useRef(models);
useEffect(() => {
if (originalModelsRef.current.length === 0 && models.length > 0) {
originalModelsRef.current = models;
function handleCheckboxChange(modelName: string, checked: boolean) {
// Read current values inside the handler to avoid stale closure issues
const currentSelected = formikProps.values.selected_model_names ?? [];
const currentDefault = formikProps.values.default_model_name;
if (checked) {
const newSelected = [...currentSelected, modelName];
formikProps.setFieldValue("selected_model_names", newSelected);
// If this is the first model, set it as default
if (currentSelected.length === 0) {
formikProps.setFieldValue("default_model_name", modelName);
}
} else {
const newSelected = currentSelected.filter((name) => name !== modelName);
formikProps.setFieldValue("selected_model_names", newSelected);
// If removing the default, set the first remaining model as default
if (currentDefault === modelName && newSelected.length > 0) {
formikProps.setFieldValue("default_model_name", newSelected[0]);
} else if (newSelected.length === 0) {
formikProps.setFieldValue("default_model_name", undefined);
}
}
}, [models]);
}
// Automatically derive test_model_name from model_configurations.
// Any change to visibility or the model list syncs this automatically.
useEffect(() => {
const firstVisible = models.find((m) => m.is_visible)?.name;
if (firstVisible !== formikProps.values.test_model_name) {
formikProps.setFieldValue("test_model_name", firstVisible);
}
}, [models]); // eslint-disable-line react-hooks/exhaustive-deps
function setVisibility(modelName: string, visible: boolean) {
const updated = models.map((m) =>
m.name === modelName ? { ...m, is_visible: visible } : m
);
formikProps.setFieldValue("model_configurations", updated);
function handleSetDefault(modelName: string) {
formikProps.setFieldValue("default_model_name", modelName);
}
function handleToggleAutoMode(nextIsAutoMode: boolean) {
formikProps.setFieldValue("is_auto_mode", nextIsAutoMode);
if (nextIsAutoMode) {
formikProps.setFieldValue(
"model_configurations",
originalModelsRef.current
);
formikProps.setFieldValue(
"selected_model_names",
modelConfigurations.filter((m) => m.is_visible).map((m) => m.name)
);
formikProps.setFieldValue(
"default_model_name",
recommendedDefaultModel?.name ?? undefined
);
}
const allSelected =
modelConfigurations.length > 0 &&
modelConfigurations.every((m) => selectedModels.includes(m.name));
function handleToggleSelectAll() {
if (allSelected) {
formikProps.setFieldValue("selected_model_names", []);
formikProps.setFieldValue("default_model_name", undefined);
} else {
const allNames = modelConfigurations.map((m) => m.name);
formikProps.setFieldValue("selected_model_names", allNames);
if (!formikProps.values.default_model_name && allNames.length > 0) {
formikProps.setFieldValue("default_model_name", allNames[0]);
}
}
}
const allSelected = models.length > 0 && models.every((m) => m.is_visible);
function handleToggleSelectAll() {
const nextVisible = !allSelected;
const updated = models.map((m) => ({
...m,
is_visible: nextVisible,
}));
formikProps.setFieldValue("model_configurations", updated);
}
const visibleModels = models.filter((m) => m.is_visible);
const visibleModels = modelConfigurations.filter((m) => m.is_visible);
return (
<Card background="light" border="none" padding="sm">
@@ -491,83 +505,102 @@ export function ModelSelectionField({
>
<Section flexDirection="row" gap={0}>
<Button
disabled={isAutoMode || models.length === 0}
disabled={isAutoMode || modelConfigurations.length === 0}
prominence="tertiary"
size="md"
onClick={handleToggleSelectAll}
>
{allSelected ? "Deselect All" : "Select All"}
{allSelected ? "Unselect All" : "Select All"}
</Button>
{onRefetch && <RefetchButton onRefetch={onRefetch} />}
</Section>
</InputLayouts.Horizontal>
{models.length === 0 ? (
{modelConfigurations.length === 0 ? (
<EmptyMessageCard title="No models available." padding="sm" />
) : (
<Section gap={0.25}>
{(() => {
const displayModels = isAutoMode ? visibleModels : models;
const isFoldable = displayModels.length > FOLD_THRESHOLD;
const shownModels =
isFoldable && !isExpanded
? displayModels.slice(0, FOLD_THRESHOLD)
: displayModels;
{isAutoMode
? // Auto mode: read-only display
visibleModels.map((model) => (
<Hoverable.Root
key={model.name}
group="LLMConfigurationButton"
widthVariant="full"
>
<LineItemButton
variant="section"
sizePreset="main-ui"
selectVariant="select-heavy"
state="selected"
icon={() => <Checkbox checked />}
title={model.display_name || model.name}
rightChildren={
model.name === defaultModel ? (
<Section>
<Tag title="Default Model" color="blue" />
</Section>
) : undefined
}
/>
</Hoverable.Root>
))
: // Manual mode: checkbox selection
modelConfigurations.map((modelConfiguration) => {
const isSelected = selectedModels.includes(
modelConfiguration.name
);
const isDefault = defaultModel === modelConfiguration.name;
return (
<>
{shownModels.map((model) =>
isAutoMode ? (
return (
<Hoverable.Root
key={modelConfiguration.name}
group="LLMConfigurationButton"
widthVariant="full"
>
<LineItemButton
key={model.name}
variant="section"
sizePreset="main-ui"
selectVariant="select-heavy"
state="selected"
icon={() => <Checkbox checked />}
title={model.display_name || model.name}
/>
) : (
<LineItemButton
key={model.name}
variant="section"
sizePreset="main-ui"
selectVariant="select-heavy"
state={model.is_visible ? "selected" : "empty"}
icon={() => <Checkbox checked={model.is_visible} />}
title={model.name}
state={isSelected ? "selected" : "empty"}
icon={() => <Checkbox checked={isSelected} />}
title={modelConfiguration.name}
onClick={() =>
setVisibility(model.name, !model.is_visible)
handleCheckboxChange(
modelConfiguration.name,
!isSelected
)
}
rightChildren={
isSelected ? (
isDefault ? (
<Section>
<Tag color="blue" title="Default Model" />
</Section>
) : (
<Hoverable.Item
group="LLMConfigurationButton"
variant="opacity-on-hover"
>
<Button
size="sm"
prominence="internal"
onClick={(e) => {
e.stopPropagation();
handleSetDefault(modelConfiguration.name);
}}
type="button"
>
Set as default
</Button>
</Hoverable.Item>
)
) : undefined
}
/>
)
)}
{isFoldable && (
<Interactive.Stateless
prominence="tertiary"
onClick={() => setIsExpanded(!isExpanded)}
>
<Interactive.Container type="button" widthVariant="full">
<Content
sizePreset="secondary"
variant="body"
title={isExpanded ? "Fold Models" : "More Models"}
icon={() => (
<SvgChevronDown
className={cn(
"transition-transform",
isExpanded && "-rotate-180"
)}
size={14}
/>
)}
/>
</Interactive.Container>
</Interactive.Stateless>
)}
</>
);
})()}
</Hoverable.Root>
);
})}
</Section>
)}
@@ -582,7 +615,7 @@ export function ModelSelectionField({
if (e.key === "Enter" && newModelName.trim()) {
e.preventDefault();
const trimmed = newModelName.trim();
if (!models.some((m) => m.name === trimmed)) {
if (!modelConfigurations.some((m) => m.name === trimmed)) {
onAddModel(trimmed);
setNewModelName("");
}
@@ -597,11 +630,14 @@ export function ModelSelectionField({
type="button"
disabled={
!newModelName.trim() ||
models.some((m) => m.name === newModelName.trim())
modelConfigurations.some((m) => m.name === newModelName.trim())
}
onClick={() => {
const trimmed = newModelName.trim();
if (trimmed && !models.some((m) => m.name === trimmed)) {
if (
trimmed &&
!modelConfigurations.some((m) => m.name === trimmed)
) {
onAddModel(trimmed);
setNewModelName("");
}
@@ -628,96 +664,41 @@ export function ModelSelectionField({
);
}
// ─── ModalWrapper ─────────────────────────────────────────────────────
// ============================================================================
// LLMConfigurationModalWrapper
// ============================================================================
export interface ModalWrapperProps<
T extends BaseLLMFormValues = BaseLLMFormValues,
> {
providerName: string;
llmProvider?: LLMProviderView;
interface LLMConfigurationModalWrapperProps {
providerEndpoint: string;
providerName?: string;
existingProviderName?: string;
onClose: () => void;
initialValues: T;
validationSchema: FormikConfig<T>["validationSchema"];
onSubmit: FormikConfig<T>["onSubmit"];
children: React.ReactNode;
isFormValid: boolean;
isDirty?: boolean;
isTesting?: boolean;
isSubmitting?: boolean;
children: ReactNode;
}
export function ModalWrapper<T extends BaseLLMFormValues = BaseLLMFormValues>({
export function LLMConfigurationModalWrapper({
providerEndpoint,
providerName,
llmProvider,
existingProviderName,
onClose,
initialValues,
validationSchema,
onSubmit,
isFormValid,
isDirty,
isTesting,
isSubmitting,
children,
}: ModalWrapperProps<T>) {
return (
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
validateOnMount
onSubmit={onSubmit}
>
{() => (
<ModalWrapperInner
providerName={providerName}
llmProvider={llmProvider}
onClose={onClose}
modelConfigurations={initialValues.model_configurations}
>
{children}
</ModalWrapperInner>
)}
</Formik>
);
}
interface ModalWrapperInnerProps {
providerName: string;
llmProvider?: LLMProviderView;
onClose: () => void;
modelConfigurations?: ModelConfiguration[];
children: React.ReactNode;
}
function ModalWrapperInner({
providerName,
llmProvider,
onClose,
modelConfigurations,
children,
}: ModalWrapperInnerProps) {
const { isValid, dirty, isSubmitting, status, setFieldValue, values } =
useFormikContext<BaseLLMFormValues>();
// When SWR resolves after mount, populate model_configurations if still
// empty. test_model_name is then derived automatically by
// ModelSelectionField's useEffect.
useEffect(() => {
if (
modelConfigurations &&
modelConfigurations.length > 0 &&
values.model_configurations.length === 0
) {
setFieldValue("model_configurations", modelConfigurations);
}
}, [modelConfigurations]); // eslint-disable-line react-hooks/exhaustive-deps
const isTesting = status?.isTesting === true;
}: LLMConfigurationModalWrapperProps) {
const busy = isTesting || isSubmitting;
const providerIcon = getProviderIcon(providerEndpoint);
const providerDisplayName =
providerName ?? getProviderDisplayName(providerEndpoint);
const providerProductName = getProviderProductName(providerEndpoint);
const disabledTooltip = busy
? undefined
: !isValid
? "Please fill in all required fields."
: !dirty
? "No changes to save."
: undefined;
const providerIcon = getProviderIcon(providerName);
const providerDisplayName = getProviderDisplayName(providerName);
const providerProductName = getProviderProductName(providerName);
const title = llmProvider
? `Configure "${llmProvider.name}"`
const title = existingProviderName
? `Configure "${existingProviderName}"`
: `Set up ${providerProductName}`;
const description = `Connect to ${providerDisplayName} and set up your ${providerProductName} models.`;
@@ -741,12 +722,13 @@ function ModalWrapperInner({
Cancel
</Button>
<Button
disabled={!isValid || !dirty || busy}
disabled={
!isFormValid || busy || (!!existingProviderName && !isDirty)
}
type="submit"
icon={busy ? SimpleLoader : undefined}
tooltip={disabledTooltip}
>
{llmProvider?.name
{existingProviderName
? busy
? "Updating"
: "Update"

View File

@@ -1,8 +1,13 @@
import { LLMProviderName, LLMProviderView } from "@/interfaces/llm";
import {
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import {
LLM_ADMIN_URL,
LLM_PROVIDERS_ADMIN_URL,
} from "@/lib/llmConfig/constants";
import { refreshLlmProviderCaches } from "@/lib/llmConfig/cache";
import { toast } from "@/hooks/useToast";
import isEqual from "lodash/isEqual";
import { parseAzureTargetUri } from "@/lib/azureTargetUri";
@@ -13,11 +18,13 @@ import {
} from "@/lib/analytics";
import {
BaseLLMFormValues,
SubmitLLMProviderParams,
SubmitOnboardingProviderParams,
TestApiKeyResult,
filterModelConfigurations,
getAutoModeModelConfigurations,
} from "@/sections/modals/llmConfig/utils";
// ─── Test helpers ─────────────────────────────────────────────────────────
const submitLlmTestRequest = async (
payload: Record<string, unknown>,
fallbackErrorMessage: string
@@ -43,6 +50,162 @@ const submitLlmTestRequest = async (
}
};
export const submitLLMProvider = async <T extends BaseLLMFormValues>({
providerName,
values,
initialValues,
modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
hideSuccess,
setIsTesting,
mutate,
onClose,
setSubmitting,
}: SubmitLLMProviderParams<T>): Promise<void> => {
setSubmitting(true);
const { selected_model_names: visibleModels, api_key, ...rest } = values;
// In auto mode, use recommended models from descriptor
// In manual mode, use user's selection
let filteredModelConfigurations: ModelConfiguration[];
let finalDefaultModelName =
rest.default_model_name || modelConfigurations[0]?.name || "";
if (values.is_auto_mode) {
filteredModelConfigurations =
getAutoModeModelConfigurations(modelConfigurations);
// In auto mode, use the first recommended model as default if current default isn't in the list
const visibleModelNames = new Set(
filteredModelConfigurations.map((m) => m.name)
);
if (
finalDefaultModelName &&
!visibleModelNames.has(finalDefaultModelName)
) {
finalDefaultModelName = filteredModelConfigurations[0]?.name ?? "";
}
} else {
filteredModelConfigurations = filterModelConfigurations(
modelConfigurations,
visibleModels,
rest.default_model_name as string | undefined
);
}
const customConfigChanged = !isEqual(
values.custom_config,
initialValues.custom_config
);
const normalizedApiBase =
typeof rest.api_base === "string" && rest.api_base.trim() === ""
? undefined
: rest.api_base;
const finalValues = {
...rest,
api_base: normalizedApiBase,
default_model_name: finalDefaultModelName,
api_key,
api_key_changed: api_key !== (initialValues.api_key as string | undefined),
custom_config_changed: customConfigChanged,
model_configurations: filteredModelConfigurations,
};
// Test the configuration
if (!isEqual(finalValues, initialValues)) {
setIsTesting(true);
const response = await fetch("/api/admin/llm/test", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
provider: providerName,
...finalValues,
model: finalDefaultModelName,
id: existingLlmProvider?.id,
}),
});
setIsTesting(false);
if (!response.ok) {
const errorMsg = (await response.json()).detail;
toast.error(errorMsg);
setSubmitting(false);
return;
}
}
const response = await fetch(
`${LLM_PROVIDERS_ADMIN_URL}${
existingLlmProvider ? "" : "?is_creation=true"
}`,
{
method: "PUT",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
provider: providerName,
...finalValues,
id: existingLlmProvider?.id,
}),
}
);
if (!response.ok) {
const errorMsg = (await response.json()).detail;
const fullErrorMsg = existingLlmProvider
? `Failed to update provider: ${errorMsg}`
: `Failed to enable provider: ${errorMsg}`;
toast.error(fullErrorMsg);
return;
}
if (shouldMarkAsDefault) {
const newLlmProvider = (await response.json()) as LLMProviderView;
const setDefaultResponse = await fetch(`${LLM_ADMIN_URL}/default`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
provider_id: newLlmProvider.id,
model_name: finalDefaultModelName,
}),
});
if (!setDefaultResponse.ok) {
const errorMsg = (await setDefaultResponse.json()).detail;
toast.error(`Failed to set provider as default: ${errorMsg}`);
return;
}
}
await refreshLlmProviderCaches(mutate);
onClose();
if (!hideSuccess) {
const successMsg = existingLlmProvider
? "Provider updated successfully!"
: "Provider enabled successfully!";
toast.success(successMsg);
}
const knownProviders = new Set<string>(Object.values(LLMProviderName));
track(AnalyticsEvent.CONFIGURED_LLM_PROVIDER, {
provider: knownProviders.has(providerName) ? providerName : "custom",
is_creation: !existingLlmProvider,
source: LLMProviderConfiguredSource.ADMIN_PAGE,
});
setSubmitting(false);
};
export const testApiKeyHelper = async (
providerName: string,
formValues: Record<string, unknown>,
@@ -79,7 +242,7 @@ export const testApiKeyHelper = async (
...((formValues?.custom_config as Record<string, unknown>) ?? {}),
...(customConfigOverride ?? {}),
},
model: modelName ?? (formValues?.test_model_name as string) ?? "",
model: modelName ?? (formValues?.default_model_name as string) ?? "",
};
return await submitLlmTestRequest(
@@ -97,148 +260,96 @@ export const testCustomProvider = async (
);
};
// ─── Submit provider ──────────────────────────────────────────────────────
export interface SubmitProviderParams<
T extends BaseLLMFormValues = BaseLLMFormValues,
> {
providerName: string;
values: T;
initialValues: T;
existingLlmProvider?: LLMProviderView;
shouldMarkAsDefault?: boolean;
isCustomProvider?: boolean;
setStatus: (status: Record<string, unknown>) => void;
setSubmitting: (submitting: boolean) => void;
onClose: () => void;
/** Called after successful create/update + set-default. Use for cache refresh, state updates, toasts, etc. */
onSuccess?: () => void | Promise<void>;
/** Analytics source for tracking. @default LLMProviderConfiguredSource.ADMIN_PAGE */
analyticsSource?: LLMProviderConfiguredSource;
}
export async function submitProvider<T extends BaseLLMFormValues>({
export const submitOnboardingProvider = async ({
providerName,
values,
initialValues,
existingLlmProvider,
shouldMarkAsDefault,
payload,
onboardingState,
onboardingActions,
isCustomProvider,
setStatus,
setSubmitting,
onClose,
onSuccess,
analyticsSource = LLMProviderConfiguredSource.ADMIN_PAGE,
}: SubmitProviderParams<T>): Promise<void> {
setSubmitting(true);
setIsSubmitting,
}: SubmitOnboardingProviderParams): Promise<void> => {
setIsSubmitting(true);
const { test_model_name, api_key, ...rest } = values;
const testModelName =
test_model_name ||
values.model_configurations.find((m) => m.is_visible)?.name ||
"";
// ── Test credentials ────────────────────────────────────────────────
const customConfigChanged = !isEqual(
values.custom_config,
initialValues.custom_config
);
const normalizedApiBase =
typeof rest.api_base === "string" && rest.api_base.trim() === ""
? undefined
: rest.api_base;
const finalValues = {
...rest,
api_base: normalizedApiBase,
api_key,
api_key_changed: api_key !== (initialValues.api_key as string | undefined),
custom_config_changed: customConfigChanged,
};
if (!isEqual(finalValues, initialValues)) {
setStatus({ isTesting: true });
const testResult = await submitLlmTestRequest(
{
provider: providerName,
...finalValues,
model: testModelName,
id: existingLlmProvider?.id,
},
"An error occurred while testing the provider."
);
setStatus({ isTesting: false });
if (!testResult.ok) {
toast.error(testResult.errorMessage);
setSubmitting(false);
return;
}
// Test credentials
let result: TestApiKeyResult;
if (isCustomProvider) {
result = await testCustomProvider(payload);
} else {
result = await testApiKeyHelper(providerName, payload);
}
// ── Create/update provider ──────────────────────────────────────────
const response = await fetch(
`${LLM_PROVIDERS_ADMIN_URL}${
existingLlmProvider ? "" : "?is_creation=true"
}`,
{
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
provider: providerName,
...finalValues,
id: existingLlmProvider?.id,
}),
}
);
if (!response.ok) {
const errorMsg = (await response.json()).detail;
const fullErrorMsg = existingLlmProvider
? `Failed to update provider: ${errorMsg}`
: `Failed to enable provider: ${errorMsg}`;
toast.error(fullErrorMsg);
setSubmitting(false);
if (!result.ok) {
toast.error(result.errorMessage);
setIsSubmitting(false);
return;
}
// ── Set as default ──────────────────────────────────────────────────
if (shouldMarkAsDefault && testModelName) {
// Create provider
const response = await fetch(`${LLM_PROVIDERS_ADMIN_URL}?is_creation=true`, {
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(payload),
});
if (!response.ok) {
const errorMsg = (await response.json()).detail;
toast.error(errorMsg);
setIsSubmitting(false);
return;
}
// Set as default if first provider
if (
onboardingState?.data?.llmProviders == null ||
onboardingState.data.llmProviders.length === 0
) {
try {
const newLlmProvider = await response.json();
if (newLlmProvider?.id != null) {
const setDefaultResponse = await fetch(`${LLM_ADMIN_URL}/default`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
provider_id: newLlmProvider.id,
model_name: testModelName,
}),
});
if (!setDefaultResponse.ok) {
const err = await setDefaultResponse.json().catch(() => ({}));
toast.error(err?.detail ?? "Failed to set provider as default");
setSubmitting(false);
return;
const defaultModelName =
(payload as Record<string, string>).default_model_name ??
(payload as Record<string, ModelConfiguration[]>)
.model_configurations?.[0]?.name ??
"";
if (defaultModelName) {
const setDefaultResponse = await fetch(`${LLM_ADMIN_URL}/default`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
provider_id: newLlmProvider.id,
model_name: defaultModelName,
}),
});
if (!setDefaultResponse.ok) {
const err = await setDefaultResponse.json().catch(() => ({}));
toast.error(err?.detail ?? "Failed to set provider as default");
setIsSubmitting(false);
return;
}
}
}
} catch {
} catch (_e) {
toast.error("Failed to set new provider as default");
}
}
// ── Post-success ────────────────────────────────────────────────────
const knownProviders = new Set<string>(Object.values(LLMProviderName));
track(AnalyticsEvent.CONFIGURED_LLM_PROVIDER, {
provider: knownProviders.has(providerName) ? providerName : "custom",
is_creation: !existingLlmProvider,
source: analyticsSource,
provider: isCustomProvider ? "custom" : providerName,
is_creation: true,
source: LLMProviderConfiguredSource.CHAT_ONBOARDING,
});
if (onSuccess) await onSuccess();
// Update onboarding state
onboardingActions.updateData({
llmProviders: [
...(onboardingState?.data.llmProviders ?? []),
isCustomProvider ? "custom" : providerName,
],
});
onboardingActions.setButtonActive(true);
setSubmitting(false);
setIsSubmitting(false);
onClose();
}
};

View File

@@ -1,130 +1,197 @@
import {
LLMProviderName,
LLMProviderView,
ModelConfiguration,
WellKnownLLMProviderDescriptor,
} from "@/interfaces/llm";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import { ScopedMutator } from "swr";
import { OnboardingActions, OnboardingState } from "@/interfaces/onboarding";
// ─── useInitialValues ─────────────────────────────────────────────────────
// Common class names for the Form component across all LLM provider forms
export const LLM_FORM_CLASS_NAME = "flex flex-col gap-y-4 items-stretch mt-6";
/** Builds the merged model list from existing + well-known, deduped by name. */
function buildModelConfigurations(
export const buildDefaultInitialValues = (
existingLlmProvider?: LLMProviderView,
wellKnownLLMProvider?: WellKnownLLMProviderDescriptor
): ModelConfiguration[] {
const existingModels = existingLlmProvider?.model_configurations ?? [];
const wellKnownModels = wellKnownLLMProvider?.known_models ?? [];
modelConfigurations?: ModelConfiguration[],
currentDefaultModelName?: string
) => {
const defaultModelName =
(currentDefaultModelName &&
existingLlmProvider?.model_configurations?.some(
(m) => m.name === currentDefaultModelName
)
? currentDefaultModelName
: undefined) ??
existingLlmProvider?.model_configurations?.[0]?.name ??
modelConfigurations?.[0]?.name ??
"";
const modelMap = new Map<string, ModelConfiguration>();
wellKnownModels.forEach((m) => modelMap.set(m.name, m));
existingModels.forEach((m) => modelMap.set(m.name, m));
return Array.from(modelMap.values());
}
/** Shared initial values for all LLM provider forms (both onboarding and admin). */
export function useInitialValues(
isOnboarding: boolean,
providerName: LLMProviderName,
existingLlmProvider?: LLMProviderView
) {
const { wellKnownLLMProvider } = useWellKnownLLMProvider(providerName);
const modelConfigurations = buildModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? undefined
);
const testModelName =
modelConfigurations.find((m) => m.is_visible)?.name ??
wellKnownLLMProvider?.recommended_default_model?.name;
// Auto mode must be explicitly enabled by the user
// Default to false for new providers, preserve existing value when editing
const isAutoMode = existingLlmProvider?.is_auto_mode ?? false;
return {
provider: existingLlmProvider?.provider ?? providerName,
name: isOnboarding ? providerName : existingLlmProvider?.name ?? "",
api_key: existingLlmProvider?.api_key ?? undefined,
api_base: existingLlmProvider?.api_base ?? undefined,
name: existingLlmProvider?.name || "",
default_model_name: defaultModelName,
is_public: existingLlmProvider?.is_public ?? true,
is_auto_mode: existingLlmProvider?.is_auto_mode ?? true,
is_auto_mode: isAutoMode,
groups: existingLlmProvider?.groups ?? [],
personas: existingLlmProvider?.personas ?? [],
model_configurations: modelConfigurations,
test_model_name: testModelName,
selected_model_names: existingLlmProvider
? existingLlmProvider.model_configurations
.filter((modelConfiguration) => modelConfiguration.is_visible)
.map((modelConfiguration) => modelConfiguration.name)
: modelConfigurations
?.filter((modelConfiguration) => modelConfiguration.is_visible)
.map((modelConfiguration) => modelConfiguration.name) ?? [],
};
}
// ─── buildValidationSchema ────────────────────────────────────────────────
interface ValidationSchemaOptions {
apiKey?: boolean;
apiBase?: boolean;
extra?: Yup.ObjectShape;
}
/**
* Builds the validation schema for a modal.
*
* @param isOnboarding — controls the base schema:
* - `true`: minimal (only `test_model_name`).
* - `false`: full admin schema (display name, access, models, etc.).
* @param options.apiKey — require `api_key`.
* @param options.apiBase — require `api_base`.
* @param options.extra — arbitrary Yup fields for provider-specific validation.
*/
export function buildValidationSchema(
isOnboarding: boolean,
{ apiKey, apiBase, extra }: ValidationSchemaOptions = {}
) {
const providerFields: Yup.ObjectShape = {
...(apiKey && {
api_key: Yup.string().required("API Key is required"),
}),
...(apiBase && {
api_base: Yup.string().required("API Base URL is required"),
}),
...extra,
};
if (isOnboarding) {
return Yup.object().shape({
test_model_name: Yup.string().required("Model name is required"),
...providerFields,
});
}
};
export const buildDefaultValidationSchema = () => {
return Yup.object({
name: Yup.string().required("Display Name is required"),
default_model_name: Yup.string().required("Model name is required"),
is_public: Yup.boolean().required(),
is_auto_mode: Yup.boolean().required(),
groups: Yup.array().of(Yup.number()),
personas: Yup.array().of(Yup.number()),
test_model_name: Yup.string().required("Model name is required"),
...providerFields,
selected_model_names: Yup.array().of(Yup.string()),
});
}
};
// ─── Form value types ─────────────────────────────────────────────────────
export const buildAvailableModelConfigurations = (
existingLlmProvider?: LLMProviderView,
wellKnownLLMProvider?: WellKnownLLMProviderDescriptor
): ModelConfiguration[] => {
const existingModels = existingLlmProvider?.model_configurations ?? [];
const wellKnownModels = wellKnownLLMProvider?.known_models ?? [];
/** Base form values that all provider forms share. */
// Create a map to deduplicate by model name, preferring existing models
const modelMap = new Map<string, ModelConfiguration>();
// Add well-known models first
wellKnownModels.forEach((model) => {
modelMap.set(model.name, model);
});
// Override with existing models (they take precedence)
existingModels.forEach((model) => {
modelMap.set(model.name, model);
});
return Array.from(modelMap.values());
};
// Base form values that all provider forms share
export interface BaseLLMFormValues {
name: string;
api_key?: string;
api_base?: string;
/** Model name used for the test request — automatically derived. */
test_model_name?: string;
default_model_name?: string;
is_public: boolean;
is_auto_mode: boolean;
groups: number[];
personas: number[];
/** The full model list with is_visible set directly by user interaction. */
model_configurations: ModelConfiguration[];
selected_model_names: string[];
custom_config?: Record<string, string>;
}
// ─── Misc ─────────────────────────────────────────────────────────────────
export interface SubmitLLMProviderParams<
T extends BaseLLMFormValues = BaseLLMFormValues,
> {
providerName: string;
values: T;
initialValues: T;
modelConfigurations: ModelConfiguration[];
existingLlmProvider?: LLMProviderView;
shouldMarkAsDefault?: boolean;
hideSuccess?: boolean;
setIsTesting: (testing: boolean) => void;
mutate: ScopedMutator;
onClose: () => void;
setSubmitting: (submitting: boolean) => void;
}
export const filterModelConfigurations = (
currentModelConfigurations: ModelConfiguration[],
visibleModels: string[],
defaultModelName?: string
): ModelConfiguration[] => {
return currentModelConfigurations
.map(
(modelConfiguration): ModelConfiguration => ({
name: modelConfiguration.name,
is_visible: visibleModels.includes(modelConfiguration.name),
max_input_tokens: modelConfiguration.max_input_tokens ?? null,
supports_image_input: modelConfiguration.supports_image_input,
supports_reasoning: modelConfiguration.supports_reasoning,
display_name: modelConfiguration.display_name,
})
)
.filter(
(modelConfiguration) =>
modelConfiguration.name === defaultModelName ||
modelConfiguration.is_visible
);
};
// Helper to get model configurations for auto mode
// In auto mode, we include ALL models but preserve their visibility status
// Models in the auto config are visible, others are created but not visible
export const getAutoModeModelConfigurations = (
modelConfigurations: ModelConfiguration[]
): ModelConfiguration[] => {
return modelConfigurations.map(
(modelConfiguration): ModelConfiguration => ({
name: modelConfiguration.name,
is_visible: modelConfiguration.is_visible,
max_input_tokens: modelConfiguration.max_input_tokens ?? null,
supports_image_input: modelConfiguration.supports_image_input,
supports_reasoning: modelConfiguration.supports_reasoning,
display_name: modelConfiguration.display_name,
})
);
};
export type TestApiKeyResult =
| { ok: true }
| { ok: false; errorMessage: string };
export const getModelOptions = (
fetchedModelConfigurations: Array<{ name: string }>
) => {
return fetchedModelConfigurations.map((model) => ({
label: model.name,
value: model.name,
}));
};
/** Initial values used by onboarding forms (flat shape, always creating new). */
export const buildOnboardingInitialValues = () => ({
name: "",
provider: "",
api_key: "",
api_base: "",
api_version: "",
default_model_name: "",
model_configurations: [] as ModelConfiguration[],
custom_config: {} as Record<string, string>,
api_key_changed: true,
groups: [] as number[],
is_public: true,
is_auto_mode: false,
personas: [] as number[],
selected_model_names: [] as string[],
deployment_name: "",
target_uri: "",
});
export interface SubmitOnboardingProviderParams {
providerName: string;
payload: Record<string, unknown>;
onboardingState: OnboardingState;
onboardingActions: OnboardingActions;
isCustomProvider: boolean;
onClose: () => void;
setIsSubmitting: (submitting: boolean) => void;
}

View File

@@ -12,7 +12,7 @@ import {
SvgServer,
SvgSettings,
} from "@opal/icons";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
export interface LLMProviderCardProps {
title: string;
@@ -75,7 +75,7 @@ function LLMProviderCardInner({
<div className="flex gap-1 p-1 flex-1 min-w-0">
<div className="flex items-start h-full pt-0.5">
{providerName ? (
<ModelIcon provider={providerName} size={16} className="" />
<ProviderIcon provider={providerName} size={16} className="" />
) : (
<SvgServer className="w-4 h-4 stroke-text-04" />
)}

View File

@@ -2,7 +2,6 @@ import React from "react";
import {
WellKnownLLMProviderDescriptor,
LLMProviderName,
LLMProviderFormProps,
} from "@/interfaces/llm";
import { OnboardingActions, OnboardingState } from "@/interfaces/onboarding";
import OpenAIModal from "@/sections/modals/llmConfig/OpenAIModal";
@@ -13,7 +12,7 @@ import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
import VertexAIModal from "@/sections/modals/llmConfig/VertexAIModal";
import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import LMStudioModal from "@/sections/modals/llmConfig/LMStudioModal";
import LMStudioForm from "@/sections/modals/llmConfig/LMStudioForm";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
@@ -82,25 +81,11 @@ export function getOnboardingForm({
onboardingActions,
onOpenChange,
}: OnboardingFormProps): React.ReactNode {
const providerName = isCustomProvider
? "custom"
: llmDescriptor?.name ?? "custom";
const sharedProps: LLMProviderFormProps = {
const sharedProps = {
variant: "onboarding" as const,
shouldMarkAsDefault:
(onboardingState?.data.llmProviders ?? []).length === 0,
onboardingState,
onboardingActions,
onOpenChange,
onSuccess: () => {
onboardingActions.updateData({
llmProviders: [
...(onboardingState?.data.llmProviders ?? []),
providerName,
],
});
onboardingActions.setButtonActive(true);
},
};
// Handle custom provider
@@ -108,36 +93,41 @@ export function getOnboardingForm({
return <CustomModal {...sharedProps} />;
}
const providerProps = {
...sharedProps,
llmDescriptor,
};
switch (llmDescriptor.name) {
case LLMProviderName.OPENAI:
return <OpenAIModal {...sharedProps} />;
return <OpenAIModal {...providerProps} />;
case LLMProviderName.ANTHROPIC:
return <AnthropicModal {...sharedProps} />;
return <AnthropicModal {...providerProps} />;
case LLMProviderName.OLLAMA_CHAT:
return <OllamaModal {...sharedProps} />;
return <OllamaModal {...providerProps} />;
case LLMProviderName.AZURE:
return <AzureModal {...sharedProps} />;
return <AzureModal {...providerProps} />;
case LLMProviderName.BEDROCK:
return <BedrockModal {...sharedProps} />;
return <BedrockModal {...providerProps} />;
case LLMProviderName.VERTEX_AI:
return <VertexAIModal {...sharedProps} />;
return <VertexAIModal {...providerProps} />;
case LLMProviderName.OPENROUTER:
return <OpenRouterModal {...sharedProps} />;
return <OpenRouterModal {...providerProps} />;
case LLMProviderName.LM_STUDIO:
return <LMStudioModal {...sharedProps} />;
return <LMStudioForm {...providerProps} />;
case LLMProviderName.LITELLM_PROXY:
return <LiteLLMProxyModal {...sharedProps} />;
return <LiteLLMProxyModal {...providerProps} />;
case LLMProviderName.OPENAI_COMPATIBLE:
return <OpenAICompatibleModal {...sharedProps} />;
return <OpenAICompatibleModal {...providerProps} />;
default:
return <CustomModal {...sharedProps} />;

View File

@@ -16,7 +16,7 @@ import {
getProviderDisplayInfo,
} from "../forms/getOnboardingForm";
import { Disabled } from "@opal/core";
import ModelIcon from "@/app/admin/configuration/llm/ModelIcon";
import { ProviderIcon } from "@/app/admin/configuration/llm/ProviderIcon";
import { SvgCheckCircle, SvgCpu, SvgExternalLink } from "@opal/icons";
import { ContentAction } from "@opal/layouts";
import { useLLMProviderOptions } from "@/lib/hooks/useLLMProviderOptions";
@@ -69,7 +69,7 @@ const StackedProviderIcons = ({ providers }: StackedProviderIconsProps) => {
zIndex: providers.length - index,
}}
>
<ModelIcon provider={provider} size={16} />
<ProviderIcon provider={provider} size={16} />
</div>
))}
{providers.length > 3 && (

View File

@@ -16,182 +16,25 @@ import { useSidebarFolded } from "@/layouts/sidebar-layouts";
import { useIsKGExposed } from "@/app/admin/kg/utils";
import { useCustomAnalyticsEnabled } from "@/lib/hooks/useCustomAnalyticsEnabled";
import { useUser } from "@/providers/UserProvider";
import { UserRole } from "@/lib/types";
import { usePaidEnterpriseFeaturesEnabled } from "@/components/settings/usePaidEnterpriseFeaturesEnabled";
import { CombinedSettings } from "@/interfaces/settings";
import { SidebarTab } from "@opal/components";
import InputTypeIn from "@/refresh-components/inputs/InputTypeIn";
import Separator from "@/refresh-components/Separator";
import Spacer from "@/refresh-components/Spacer";
import { SvgArrowUpCircle, SvgSearch, SvgX } from "@opal/icons";
import { SvgSearch, SvgX } from "@opal/icons";
import {
useBillingInformation,
useLicense,
hasActiveSubscription,
} from "@/lib/billing";
import { ADMIN_ROUTES, sidebarItem } from "@/lib/admin-routes";
import useFilter from "@/hooks/useFilter";
import { IconFunctionComponent } from "@opal/types";
import AccountPopover from "@/sections/sidebar/AccountPopover";
const SECTIONS = {
UNLABELED: "",
AGENTS_AND_ACTIONS: "Agents & Actions",
DOCUMENTS_AND_KNOWLEDGE: "Documents & Knowledge",
INTEGRATIONS: "Integrations",
PERMISSIONS: "Permissions",
ORGANIZATION: "Organization",
USAGE: "Usage",
} as const;
interface SidebarItemEntry {
section: string;
name: string;
icon: IconFunctionComponent;
link: string;
error?: boolean;
disabled?: boolean;
}
function buildItems(
isCurator: boolean,
enableCloud: boolean,
enableEnterprise: boolean,
settings: CombinedSettings | null,
kgExposed: boolean,
customAnalyticsEnabled: boolean,
hasSubscription: boolean,
hooksEnabled: boolean
): SidebarItemEntry[] {
const vectorDbEnabled = settings?.settings.vector_db_enabled !== false;
const items: SidebarItemEntry[] = [];
const add = (section: string, route: Parameters<typeof sidebarItem>[0]) => {
items.push({ ...sidebarItem(route), section });
};
const addDisabled = (
section: string,
route: Parameters<typeof sidebarItem>[0],
isDisabled: boolean
) => {
items.push({ ...sidebarItem(route), section, disabled: isDisabled });
};
// 1. No header — core configuration (admin only)
if (!isCurator) {
add(SECTIONS.UNLABELED, ADMIN_ROUTES.LLM_MODELS);
add(SECTIONS.UNLABELED, ADMIN_ROUTES.WEB_SEARCH);
add(SECTIONS.UNLABELED, ADMIN_ROUTES.IMAGE_GENERATION);
add(SECTIONS.UNLABELED, ADMIN_ROUTES.VOICE);
add(SECTIONS.UNLABELED, ADMIN_ROUTES.CODE_INTERPRETER);
add(SECTIONS.UNLABELED, ADMIN_ROUTES.CHAT_PREFERENCES);
if (vectorDbEnabled && kgExposed) {
add(SECTIONS.UNLABELED, ADMIN_ROUTES.KNOWLEDGE_GRAPH);
}
if (!enableCloud && customAnalyticsEnabled) {
addDisabled(
SECTIONS.UNLABELED,
ADMIN_ROUTES.CUSTOM_ANALYTICS,
!enableEnterprise
);
}
}
// 2. Agents & Actions
add(SECTIONS.AGENTS_AND_ACTIONS, ADMIN_ROUTES.AGENTS);
add(SECTIONS.AGENTS_AND_ACTIONS, ADMIN_ROUTES.MCP_ACTIONS);
add(SECTIONS.AGENTS_AND_ACTIONS, ADMIN_ROUTES.OPENAPI_ACTIONS);
// 3. Documents & Knowledge
if (vectorDbEnabled) {
add(SECTIONS.DOCUMENTS_AND_KNOWLEDGE, ADMIN_ROUTES.INDEXING_STATUS);
add(SECTIONS.DOCUMENTS_AND_KNOWLEDGE, ADMIN_ROUTES.ADD_CONNECTOR);
add(SECTIONS.DOCUMENTS_AND_KNOWLEDGE, ADMIN_ROUTES.DOCUMENT_SETS);
if (!isCurator && !enableCloud) {
items.push({
...sidebarItem(ADMIN_ROUTES.INDEX_SETTINGS),
section: SECTIONS.DOCUMENTS_AND_KNOWLEDGE,
error: settings?.settings.needs_reindexing,
});
}
if (!isCurator && settings?.settings.opensearch_indexing_enabled) {
add(SECTIONS.DOCUMENTS_AND_KNOWLEDGE, ADMIN_ROUTES.INDEX_MIGRATION);
}
}
// 4. Integrations (admin only)
if (!isCurator) {
add(SECTIONS.INTEGRATIONS, ADMIN_ROUTES.API_KEYS);
add(SECTIONS.INTEGRATIONS, ADMIN_ROUTES.SLACK_BOTS);
add(SECTIONS.INTEGRATIONS, ADMIN_ROUTES.DISCORD_BOTS);
if (hooksEnabled) {
add(SECTIONS.INTEGRATIONS, ADMIN_ROUTES.HOOKS);
}
}
// 5. Permissions
if (!isCurator) {
add(SECTIONS.PERMISSIONS, ADMIN_ROUTES.USERS);
addDisabled(SECTIONS.PERMISSIONS, ADMIN_ROUTES.GROUPS, !enableEnterprise);
addDisabled(SECTIONS.PERMISSIONS, ADMIN_ROUTES.SCIM, !enableEnterprise);
} else if (enableEnterprise) {
add(SECTIONS.PERMISSIONS, ADMIN_ROUTES.GROUPS);
}
// 6. Organization (admin only)
if (!isCurator) {
if (hasSubscription) {
add(SECTIONS.ORGANIZATION, ADMIN_ROUTES.BILLING);
}
addDisabled(
SECTIONS.ORGANIZATION,
ADMIN_ROUTES.TOKEN_RATE_LIMITS,
!enableEnterprise
);
addDisabled(SECTIONS.ORGANIZATION, ADMIN_ROUTES.THEME, !enableEnterprise);
}
// 7. Usage (admin only)
if (!isCurator) {
addDisabled(SECTIONS.USAGE, ADMIN_ROUTES.USAGE, !enableEnterprise);
if (settings?.settings.query_history_type !== "disabled") {
addDisabled(
SECTIONS.USAGE,
ADMIN_ROUTES.QUERY_HISTORY,
!enableEnterprise
);
}
}
// 8. Upgrade Plan (admin only, no subscription)
if (!isCurator && !hasSubscription) {
items.push({
section: SECTIONS.UNLABELED,
name: "Upgrade Plan",
icon: SvgArrowUpCircle,
link: ADMIN_ROUTES.BILLING.path,
});
}
return items;
}
/** Preserve section ordering while grouping consecutive items by section. */
function groupBySection(items: SidebarItemEntry[]) {
const groups: { section: string; items: SidebarItemEntry[] }[] = [];
for (const item of items) {
const last = groups[groups.length - 1];
if (last && last.section === item.section) {
last.items.push(item);
} else {
groups.push({ section: item.section, items: [item] });
}
}
return groups;
}
import {
buildItems,
groupBySection,
type FeatureFlags,
type SidebarItemEntry,
} from "@/lib/admin-sidebar-utils";
interface AdminSidebarProps {
enableCloudSS: boolean;
@@ -221,14 +64,12 @@ function AdminSidebarInner({
const { kgExposed } = useIsKGExposed();
const pathname = usePathname();
const { customAnalyticsEnabled } = useCustomAnalyticsEnabled();
const { user } = useUser();
const { permissions } = useUser();
const settings = useSettingsContext();
const enableEnterprise = usePaidEnterpriseFeaturesEnabled();
const { data: billingData, isLoading: billingLoading } =
useBillingInformation();
const { data: licenseData, isLoading: licenseLoading } = useLicense();
const isCurator =
user?.role === UserRole.CURATOR || user?.role === UserRole.GLOBAL_CURATOR;
// Default to true while loading to avoid flashing "Upgrade Plan"
const hasSubscriptionOrLicense =
billingLoading || licenseLoading
@@ -237,19 +78,21 @@ function AdminSidebarInner({
(billingData && hasActiveSubscription(billingData)) ||
licenseData?.has_license
);
const hooksEnabled =
enableEnterprise && (settings?.settings.hooks_enabled ?? false);
const allItems = buildItems(
isCurator,
enableCloudSS,
enableEnterprise,
settings,
const flags: FeatureFlags = {
vectorDbEnabled: settings?.settings.vector_db_enabled !== false,
kgExposed,
enableCloud: enableCloudSS,
enableEnterprise,
customAnalyticsEnabled,
hasSubscriptionOrLicense,
hooksEnabled
);
hasSubscription: hasSubscriptionOrLicense,
hooksEnabled:
enableEnterprise && (settings?.settings.hooks_enabled ?? false),
opensearchEnabled: settings?.settings.opensearch_indexing_enabled ?? false,
queryHistoryEnabled: settings?.settings.query_history_type !== "disabled",
};
const allItems = buildItems(permissions, flags, settings);
const itemExtractor = useCallback((item: SidebarItemEntry) => item.name, []);

View File

@@ -55,6 +55,7 @@ import { showErrorNotification, handleMoveOperation } from "./sidebarUtils";
import { SidebarTab } from "@opal/components";
import { ChatSession } from "@/app/app/interfaces";
import { useUser } from "@/providers/UserProvider";
import { getFirstPermittedAdminRoute } from "@/lib/permissions";
import useAppFocus from "@/hooks/useAppFocus";
import { useCreateModal } from "@/refresh-components/contexts/ModalContext";
import { useModalContext } from "@/components/context/ModalContext";
@@ -479,7 +480,7 @@ const MemoizedAppSidebarInner = memo(function AppSidebarInner() {
]
);
const { isAdmin, isCurator, user } = useUser();
const { isAdmin, isCurator, hasAdminAccess, permissions, user } = useUser();
const activeSidebarTab = useAppFocus();
const createProjectModal = useCreateModal();
const defaultAppMode =
@@ -584,13 +585,13 @@ const MemoizedAppSidebarInner = memo(function AppSidebarInner() {
const settingsButton = useMemo(
() => (
<div>
{(isAdmin || isCurator) && (
{hasAdminAccess && (
<SidebarTab
href={isCurator ? "/admin/agents" : "/admin/configuration/llm"}
href={getFirstPermittedAdminRoute(permissions)}
icon={SvgSettings}
folded={folded}
>
{isAdmin ? "Admin Panel" : "Curator Panel"}
Admin Panel
</SidebarTab>
)}
<AccountPopover
@@ -601,7 +602,13 @@ const MemoizedAppSidebarInner = memo(function AppSidebarInner() {
/>
</div>
),
[folded, isAdmin, isCurator, handleShowBuildIntro, isOnyxCraftEnabled]
[
folded,
hasAdminAccess,
permissions,
handleShowBuildIntro,
isOnyxCraftEnabled,
]
);
return (