Compare commits

...

9 Commits

Author SHA1 Message Date
github-actions[bot]
c348d1855d feat: generic OpenAI Compatible LLM Provider setup (#9968) to release v3.1 (#9975)
Co-authored-by: Jamison Lahman <jamison@lahman.dev>
2026-04-07 13:20:41 -07:00
github-actions[bot]
b4579a1365 fix(indexing, powerpoint files): Patch markitdown _convert_chart_to_markdown to no-op (#9970) to release v3.1 (#9979)
Co-authored-by: acaprau <48705707+acaprau@users.noreply.github.com>
2026-04-07 13:02:34 -07:00
Justin Tahara
893c094aed fix(groups): Global Curator Permissions (#9974) 2026-04-07 13:01:38 -07:00
Wenxi
f8a55712d2 fix: set correct ee mode for mcp server (#9933) 2026-04-07 09:13:23 -07:00
github-actions[bot]
591afd4fb1 fix: stop falsely rejecting owner-password-only PDFs as protected (#9953) to release v3.1 (#9962)
Co-authored-by: Jamison Lahman <jamison@lahman.dev>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-06 21:23:13 -07:00
github-actions[bot]
9328070dc0 fix(federated): prevent masked credentials from corrupting stored secrets (#9868) to release v3.1 (#9928) 2026-04-05 16:18:14 -07:00
Jamison Lahman
6163521126 Revert "chore(deps): bump litellm from 1.81.6 to 1.83.0 (#9898) to release v3.1" (#9910) 2026-04-03 18:32:10 -07:00
Jamison Lahman
d42c5616b0 chore(deps): bump litellm from 1.81.6 to 1.83.0 (#9898) to release v3.1 (#9902)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-04-03 16:18:19 -07:00
Justin Tahara
aeb4fdd6c1 fix(db): remove unnecessary selectinload(User.memories) from auth paths (#9838) 2026-04-01 17:16:57 -07:00
34 changed files with 1052 additions and 112 deletions

View File

@@ -12,6 +12,11 @@ SLACK_USER_TOKEN_PREFIX = "xoxp-"
SLACK_BOT_TOKEN_PREFIX = "xoxb-"
ONYX_EMAILABLE_LOGO_MAX_DIM = 512
# The mask_string() function in encryption.py uses "•" (U+2022 BULLET) to mask secrets.
MASK_CREDENTIAL_CHAR = "\u2022"
# Pattern produced by mask_string for strings >= 14 chars: "abcd...wxyz" (exactly 11 chars)
MASK_CREDENTIAL_LONG_RE = re.compile(r"^.{4}\.{3}.{4}$")
SOURCE_TYPE = "source_type"
# stored in the `metadata` of a chunk. Used to signify that this chunk should
# not be used for QA. For example, Google Drive file types which can't be parsed

View File

@@ -4,7 +4,6 @@ from fastapi_users.password import PasswordHelper
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.auth.api_key import ApiKeyDescriptor
@@ -55,7 +54,6 @@ async def fetch_user_for_api_key(
select(User)
.join(ApiKey, ApiKey.user_id == User.id)
.where(ApiKey.hashed_api_key == hashed_api_key)
.options(selectinload(User.memories))
)

View File

@@ -13,7 +13,6 @@ from sqlalchemy import func
from sqlalchemy import Select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.auth.schemas import UserRole
@@ -98,11 +97,6 @@ async def get_user_count(only_admin_users: bool = False) -> int:
# Need to override this because FastAPI Users doesn't give flexibility for backend field creation logic in OAuth flow
class SQLAlchemyUserAdminDB(SQLAlchemyUserDatabase[UP, ID]):
async def _get_user(self, statement: Select) -> UP | None:
statement = statement.options(selectinload(User.memories))
results = await self.session.execute(statement)
return results.unique().scalar_one_or_none()
async def create(
self,
create_dict: Dict[str, Any],

View File

@@ -8,6 +8,8 @@ from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.configs.constants import FederatedConnectorSource
from onyx.configs.constants import MASK_CREDENTIAL_CHAR
from onyx.configs.constants import MASK_CREDENTIAL_LONG_RE
from onyx.db.engine.sql_engine import get_session_with_current_tenant
from onyx.db.models import DocumentSet
from onyx.db.models import FederatedConnector
@@ -45,6 +47,23 @@ def fetch_all_federated_connectors_parallel() -> list[FederatedConnector]:
return fetch_all_federated_connectors(db_session)
def _reject_masked_credentials(credentials: dict[str, Any]) -> None:
"""Raise if any credential string value contains mask placeholder characters.
mask_string() has two output formats:
- Short strings (< 14 chars): "••••••••••••" (U+2022 BULLET)
- Long strings (>= 14 chars): "abcd...wxyz" (first4 + "..." + last4)
Both must be rejected.
"""
for key, val in credentials.items():
if isinstance(val, str) and (
MASK_CREDENTIAL_CHAR in val or MASK_CREDENTIAL_LONG_RE.match(val)
):
raise ValueError(
f"Credential field '{key}' contains masked placeholder characters. Please provide the actual credential value."
)
def validate_federated_connector_credentials(
source: FederatedConnectorSource,
credentials: dict[str, Any],
@@ -66,6 +85,8 @@ def create_federated_connector(
config: dict[str, Any] | None = None,
) -> FederatedConnector:
"""Create a new federated connector with credential and config validation."""
_reject_masked_credentials(credentials)
# Validate credentials before creating
if not validate_federated_connector_credentials(source, credentials):
raise ValueError(
@@ -277,6 +298,8 @@ def update_federated_connector(
)
if credentials is not None:
_reject_masked_credentials(credentials)
# Validate credentials before updating
if not validate_federated_connector_credentials(
federated_connector.source, credentials

View File

@@ -8,7 +8,6 @@ from uuid import UUID
from sqlalchemy import select
from sqlalchemy import update
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from onyx.auth.pat import build_displayable_pat
@@ -47,7 +46,6 @@ async def fetch_user_for_pat(
(PersonalAccessToken.expires_at.is_(None))
| (PersonalAccessToken.expires_at > now)
)
.options(selectinload(User.memories))
)
if not user:
return None

View File

@@ -229,7 +229,9 @@ def get_memories_for_user(
user_id: UUID,
db_session: Session,
) -> Sequence[Memory]:
return db_session.scalars(select(Memory).where(Memory.user_id == user_id)).all()
return db_session.scalars(
select(Memory).where(Memory.user_id == user_id).order_by(Memory.id.desc())
).all()
def update_user_pinned_assistants(

View File

@@ -49,9 +49,21 @@ KNOWN_OPENPYXL_BUGS = [
def get_markitdown_converter() -> "MarkItDown":
global _MARKITDOWN_CONVERTER
from markitdown import MarkItDown
if _MARKITDOWN_CONVERTER is None:
from markitdown import MarkItDown
# Patch this function to effectively no-op because we were seeing this
# module take an inordinate amount of time to convert charts to markdown,
# making some powerpoint files with many or complicated charts nearly
# unindexable.
from markitdown.converters._pptx_converter import PptxConverter
setattr(
PptxConverter,
"_convert_chart_to_markdown",
lambda self, chart: "\n\n[chart omitted]\n\n", # noqa: ARG005
)
_MARKITDOWN_CONVERTER = MarkItDown(enable_plugins=False)
return _MARKITDOWN_CONVERTER
@@ -202,18 +214,26 @@ def read_pdf_file(
try:
pdf_reader = PdfReader(file)
if pdf_reader.is_encrypted and pdf_pass is not None:
if pdf_reader.is_encrypted:
# Try the explicit password first, then fall back to an empty
# string. Owner-password-only PDFs (permission restrictions but
# no open password) decrypt successfully with "".
# See https://github.com/onyx-dot-app/onyx/issues/9754
passwords = [p for p in [pdf_pass, ""] if p is not None]
decrypt_success = False
try:
decrypt_success = pdf_reader.decrypt(pdf_pass) != 0
except Exception:
logger.error("Unable to decrypt pdf")
for pw in passwords:
try:
if pdf_reader.decrypt(pw) != 0:
decrypt_success = True
break
except Exception:
pass
if not decrypt_success:
logger.error(
"Encrypted PDF could not be decrypted, returning empty text."
)
return "", metadata, []
elif pdf_reader.is_encrypted:
logger.warning("No Password for an encrypted PDF, returning empty text.")
return "", metadata, []
# Basic PDF metadata
if pdf_reader.metadata is not None:

View File

@@ -33,8 +33,20 @@ def is_pdf_protected(file: IO[Any]) -> bool:
with preserve_position(file):
reader = PdfReader(file)
if not reader.is_encrypted:
return False
return bool(reader.is_encrypted)
# PDFs with only an owner password (permission restrictions like
# print/copy disabled) use an empty user password — any viewer can open
# them without prompting. decrypt("") returns 0 only when a real user
# password is required. See https://github.com/onyx-dot-app/onyx/issues/9754
try:
return reader.decrypt("") == 0
except Exception:
logger.exception(
"Failed to evaluate PDF encryption; treating as password protected"
)
return True
def is_docx_protected(file: IO[Any]) -> bool:

View File

@@ -26,6 +26,7 @@ class LlmProviderNames(str, Enum):
MISTRAL = "mistral"
LITELLM_PROXY = "litellm_proxy"
BIFROST = "bifrost"
OPENAI_COMPATIBLE = "openai_compatible"
def __str__(self) -> str:
"""Needed so things like:
@@ -46,6 +47,7 @@ WELL_KNOWN_PROVIDER_NAMES = [
LlmProviderNames.LM_STUDIO,
LlmProviderNames.LITELLM_PROXY,
LlmProviderNames.BIFROST,
LlmProviderNames.OPENAI_COMPATIBLE,
]
@@ -64,6 +66,7 @@ PROVIDER_DISPLAY_NAMES: dict[str, str] = {
LlmProviderNames.LM_STUDIO: "LM Studio",
LlmProviderNames.LITELLM_PROXY: "LiteLLM Proxy",
LlmProviderNames.BIFROST: "Bifrost",
LlmProviderNames.OPENAI_COMPATIBLE: "OpenAI Compatible",
"groq": "Groq",
"anyscale": "Anyscale",
"deepseek": "DeepSeek",
@@ -116,6 +119,7 @@ AGGREGATOR_PROVIDERS: set[str] = {
LlmProviderNames.AZURE,
LlmProviderNames.LITELLM_PROXY,
LlmProviderNames.BIFROST,
LlmProviderNames.OPENAI_COMPATIBLE,
}
# Model family name mappings for display name generation

View File

@@ -305,12 +305,19 @@ class LitellmLLM(LLM):
):
model_kwargs[VERTEX_LOCATION_KWARG] = "global"
# Bifrost: OpenAI-compatible proxy that expects model names in
# provider/model format (e.g. "anthropic/claude-sonnet-4-6").
# We route through LiteLLM's openai provider with the Bifrost base URL,
# and ensure /v1 is appended.
if model_provider == LlmProviderNames.BIFROST:
# Bifrost and OpenAI-compatible: OpenAI-compatible proxies that send
# model names directly to the endpoint. We route through LiteLLM's
# openai provider with the server's base URL, and ensure /v1 is appended.
if model_provider in (
LlmProviderNames.BIFROST,
LlmProviderNames.OPENAI_COMPATIBLE,
):
self._custom_llm_provider = "openai"
# LiteLLM's OpenAI client requires an api_key to be set.
# Many OpenAI-compatible servers don't need auth, so supply a
# placeholder to prevent LiteLLM from raising AuthenticationError.
if not self._api_key:
model_kwargs.setdefault("api_key", "not-needed")
if self._api_base is not None:
base = self._api_base.rstrip("/")
self._api_base = base if base.endswith("/v1") else f"{base}/v1"
@@ -427,17 +434,20 @@ class LitellmLLM(LLM):
optional_kwargs: dict[str, Any] = {}
# Model name
is_bifrost = self._model_provider == LlmProviderNames.BIFROST
is_openai_compatible_proxy = self._model_provider in (
LlmProviderNames.BIFROST,
LlmProviderNames.OPENAI_COMPATIBLE,
)
model_provider = (
f"{self.config.model_provider}/responses"
if is_openai_model # Uses litellm's completions -> responses bridge
else self.config.model_provider
)
if is_bifrost:
# Bifrost expects model names in provider/model format
# (e.g. "anthropic/claude-sonnet-4-6") sent directly to its
# OpenAI-compatible endpoint. We use custom_llm_provider="openai"
# so LiteLLM doesn't try to route based on the provider prefix.
if is_openai_compatible_proxy:
# OpenAI-compatible proxies (Bifrost, generic OpenAI-compatible
# servers) expect model names sent directly to their endpoint.
# We use custom_llm_provider="openai" so LiteLLM doesn't try
# to route based on the provider prefix.
model = self.config.deployment_name or self.config.model_name
else:
model = f"{model_provider}/{self.config.deployment_name or self.config.model_name}"
@@ -528,7 +538,10 @@ class LitellmLLM(LLM):
if structured_response_format:
optional_kwargs["response_format"] = structured_response_format
if not (is_claude_model or is_ollama or is_mistral) or is_bifrost:
if (
not (is_claude_model or is_ollama or is_mistral)
or is_openai_compatible_proxy
):
# Litellm bug: tool_choice is dropped silently if not specified here for OpenAI
# However, this param breaks Anthropic and Mistral models,
# so it must be conditionally included unless the request is

View File

@@ -15,6 +15,8 @@ LITELLM_PROXY_PROVIDER_NAME = "litellm_proxy"
BIFROST_PROVIDER_NAME = "bifrost"
OPENAI_COMPATIBLE_PROVIDER_NAME = "openai_compatible"
# Providers that use optional Bearer auth from custom_config
PROVIDERS_WITH_SPECIAL_API_KEY_HANDLING: dict[str, str] = {
LlmProviderNames.OLLAMA_CHAT: OLLAMA_API_KEY_CONFIG_KEY,

View File

@@ -19,6 +19,7 @@ from onyx.llm.well_known_providers.constants import BIFROST_PROVIDER_NAME
from onyx.llm.well_known_providers.constants import LITELLM_PROXY_PROVIDER_NAME
from onyx.llm.well_known_providers.constants import LM_STUDIO_PROVIDER_NAME
from onyx.llm.well_known_providers.constants import OLLAMA_PROVIDER_NAME
from onyx.llm.well_known_providers.constants import OPENAI_COMPATIBLE_PROVIDER_NAME
from onyx.llm.well_known_providers.constants import OPENAI_PROVIDER_NAME
from onyx.llm.well_known_providers.constants import OPENROUTER_PROVIDER_NAME
from onyx.llm.well_known_providers.constants import VERTEXAI_PROVIDER_NAME
@@ -51,6 +52,7 @@ def _get_provider_to_models_map() -> dict[str, list[str]]:
OPENROUTER_PROVIDER_NAME: [], # Dynamic - fetched from OpenRouter API
LITELLM_PROXY_PROVIDER_NAME: [], # Dynamic - fetched from LiteLLM proxy API
BIFROST_PROVIDER_NAME: [], # Dynamic - fetched from Bifrost API
OPENAI_COMPATIBLE_PROVIDER_NAME: [], # Dynamic - fetched from OpenAI-compatible API
}
@@ -336,6 +338,7 @@ def get_provider_display_name(provider_name: str) -> str:
VERTEXAI_PROVIDER_NAME: "Google Vertex AI",
OPENROUTER_PROVIDER_NAME: "OpenRouter",
LITELLM_PROXY_PROVIDER_NAME: "LiteLLM Proxy",
OPENAI_COMPATIBLE_PROVIDER_NAME: "OpenAI Compatible",
}
if provider_name in _ONYX_PROVIDER_DISPLAY_NAMES:

View File

@@ -6,6 +6,7 @@ from onyx.configs.app_configs import MCP_SERVER_ENABLED
from onyx.configs.app_configs import MCP_SERVER_HOST
from onyx.configs.app_configs import MCP_SERVER_PORT
from onyx.utils.logger import setup_logger
from onyx.utils.variable_functionality import set_is_ee_based_on_env_variable
logger = setup_logger()
@@ -16,6 +17,7 @@ def main() -> None:
logger.info("MCP server is disabled (MCP_SERVER_ENABLED=false)")
return
set_is_ee_based_on_env_variable()
logger.info(f"Starting MCP server on {MCP_SERVER_HOST}:{MCP_SERVER_PORT}")
from onyx.mcp_server.api import mcp_app

View File

@@ -74,6 +74,8 @@ from onyx.server.manage.llm.models import ModelConfigurationUpsertRequest
from onyx.server.manage.llm.models import OllamaFinalModelResponse
from onyx.server.manage.llm.models import OllamaModelDetails
from onyx.server.manage.llm.models import OllamaModelsRequest
from onyx.server.manage.llm.models import OpenAICompatibleFinalModelResponse
from onyx.server.manage.llm.models import OpenAICompatibleModelsRequest
from onyx.server.manage.llm.models import OpenRouterFinalModelResponse
from onyx.server.manage.llm.models import OpenRouterModelDetails
from onyx.server.manage.llm.models import OpenRouterModelsRequest
@@ -1575,3 +1577,95 @@ def _get_bifrost_models_response(api_base: str, api_key: str | None = None) -> d
source_name="Bifrost",
api_key=api_key,
)
@admin_router.post("/openai-compatible/available-models")
def get_openai_compatible_server_available_models(
request: OpenAICompatibleModelsRequest,
_: User = Depends(current_admin_user),
db_session: Session = Depends(get_session),
) -> list[OpenAICompatibleFinalModelResponse]:
"""Fetch available models from a generic OpenAI-compatible /v1/models endpoint."""
response_json = _get_openai_compatible_server_response(
api_base=request.api_base, api_key=request.api_key
)
models = response_json.get("data", [])
if not isinstance(models, list) or len(models) == 0:
raise OnyxError(
OnyxErrorCode.VALIDATION_ERROR,
"No models found from your OpenAI-compatible endpoint",
)
results: list[OpenAICompatibleFinalModelResponse] = []
for model in models:
try:
model_id = model.get("id", "")
model_name = model.get("name", model_id)
if not model_id:
continue
# Skip embedding models
if is_embedding_model(model_id):
continue
results.append(
OpenAICompatibleFinalModelResponse(
name=model_id,
display_name=model_name,
max_input_tokens=model.get("context_length"),
supports_image_input=infer_vision_support(model_id),
supports_reasoning=is_reasoning_model(model_id, model_name),
)
)
except Exception as e:
logger.warning(
"Failed to parse OpenAI-compatible model entry",
extra={"error": str(e), "item": str(model)[:1000]},
)
if not results:
raise OnyxError(
OnyxErrorCode.VALIDATION_ERROR,
"No compatible models found from OpenAI-compatible endpoint",
)
sorted_results = sorted(results, key=lambda m: m.name.lower())
# Sync new models to DB if provider_name is specified
if request.provider_name:
_sync_fetched_models(
db_session=db_session,
provider_name=request.provider_name,
models=[
SyncModelEntry(
name=r.name,
display_name=r.display_name,
max_input_tokens=r.max_input_tokens,
supports_image_input=r.supports_image_input,
)
for r in sorted_results
],
source_label="OpenAI Compatible",
)
return sorted_results
def _get_openai_compatible_server_response(
api_base: str, api_key: str | None = None
) -> dict:
"""Perform GET to an OpenAI-compatible /v1/models and return parsed JSON."""
cleaned_api_base = api_base.strip().rstrip("/")
# Ensure we hit /v1/models
if cleaned_api_base.endswith("/v1"):
url = f"{cleaned_api_base}/models"
else:
url = f"{cleaned_api_base}/v1/models"
return _get_openai_compatible_models_response(
url=url,
source_name="OpenAI Compatible",
api_key=api_key,
)

View File

@@ -464,3 +464,18 @@ class BifrostFinalModelResponse(BaseModel):
max_input_tokens: int | None
supports_image_input: bool
supports_reasoning: bool
# OpenAI Compatible dynamic models fetch
class OpenAICompatibleModelsRequest(BaseModel):
api_base: str
api_key: str | None = None
provider_name: str | None = None # Optional: to save models to existing provider
class OpenAICompatibleFinalModelResponse(BaseModel):
name: str # Model ID (e.g. "meta-llama/Llama-3-8B-Instruct")
display_name: str # Human-readable name from API
max_input_tokens: int | None
supports_image_input: bool
supports_reasoning: bool

View File

@@ -26,6 +26,7 @@ DYNAMIC_LLM_PROVIDERS = frozenset(
LlmProviderNames.OLLAMA_CHAT,
LlmProviderNames.LM_STUDIO,
LlmProviderNames.BIFROST,
LlmProviderNames.OPENAI_COMPATIBLE,
}
)

View File

@@ -147,6 +147,7 @@ class UserInfo(BaseModel):
is_anonymous_user: bool | None = None,
tenant_info: TenantInfo | None = None,
assistant_specific_configs: UserSpecificAssistantPreferences | None = None,
memories: list[MemoryItem] | None = None,
) -> "UserInfo":
return cls(
id=str(user.id),
@@ -191,10 +192,7 @@ class UserInfo(BaseModel):
role=user.personal_role or "",
use_memories=user.use_memories,
enable_memory_tool=user.enable_memory_tool,
memories=[
MemoryItem(id=memory.id, content=memory.memory_text)
for memory in (user.memories or [])
],
memories=memories or [],
user_preferences=user.user_preferences or "",
),
)

View File

@@ -57,6 +57,7 @@ from onyx.db.user_preferences import activate_user
from onyx.db.user_preferences import deactivate_user
from onyx.db.user_preferences import get_all_user_assistant_specific_configs
from onyx.db.user_preferences import get_latest_access_token_for_user
from onyx.db.user_preferences import get_memories_for_user
from onyx.db.user_preferences import update_assistant_preferences
from onyx.db.user_preferences import update_user_assistant_visibility
from onyx.db.user_preferences import update_user_auto_scroll
@@ -823,6 +824,11 @@ def verify_user_logged_in(
[],
),
)
memories = [
MemoryItem(id=memory.id, content=memory.memory_text)
for memory in get_memories_for_user(user.id, db_session)
]
user_info = UserInfo.from_model(
user,
current_token_created_at=token_created_at,
@@ -833,6 +839,7 @@ def verify_user_logged_in(
new_tenant=new_tenant,
invitation=tenant_invitation,
),
memories=memories,
)
return user_info
@@ -930,7 +937,8 @@ def update_user_personalization_api(
else user.enable_memory_tool
)
existing_memories = [
MemoryItem(id=memory.id, content=memory.memory_text) for memory in user.memories
MemoryItem(id=memory.id, content=memory.memory_text)
for memory in get_memories_for_user(user.id, db_session)
]
new_memories = (
request.memories if request.memories is not None else existing_memories

View File

@@ -0,0 +1,58 @@
import pytest
from onyx.configs.constants import MASK_CREDENTIAL_CHAR
from onyx.db.federated import _reject_masked_credentials
class TestRejectMaskedCredentials:
"""Verify that masked credential values are never accepted for DB writes.
mask_string() has two output formats:
- Short strings (< 14 chars): "••••••••••••" (U+2022 BULLET)
- Long strings (>= 14 chars): "abcd...wxyz" (first4 + "..." + last4)
_reject_masked_credentials must catch both.
"""
def test_rejects_fully_masked_value(self) -> None:
masked = MASK_CREDENTIAL_CHAR * 12 # "••••••••••••"
with pytest.raises(ValueError, match="masked placeholder"):
_reject_masked_credentials({"client_id": masked})
def test_rejects_long_string_masked_value(self) -> None:
"""mask_string returns 'first4...last4' for long strings — the real
format used for OAuth credentials like client_id and client_secret."""
with pytest.raises(ValueError, match="masked placeholder"):
_reject_masked_credentials({"client_id": "1234...7890"})
def test_rejects_when_any_field_is_masked(self) -> None:
"""Even if client_id is real, a masked client_secret must be caught."""
with pytest.raises(ValueError, match="client_secret"):
_reject_masked_credentials(
{
"client_id": "1234567890.1234567890",
"client_secret": MASK_CREDENTIAL_CHAR * 12,
}
)
def test_accepts_real_credentials(self) -> None:
# Should not raise
_reject_masked_credentials(
{
"client_id": "1234567890.1234567890",
"client_secret": "test_client_secret_value",
}
)
def test_accepts_empty_dict(self) -> None:
# Should not raise — empty credentials are handled elsewhere
_reject_masked_credentials({})
def test_ignores_non_string_values(self) -> None:
# Non-string values (None, bool, int) should pass through
_reject_masked_credentials(
{
"client_id": "real_value",
"redirect_uri": None,
"some_flag": True,
}
)

View File

@@ -0,0 +1,76 @@
%PDF-1.3
%<25><><EFBFBD><EFBFBD>
1 0 obj
<<
/Producer <1083d595b1>
>>
endobj
2 0 obj
<<
/Type /Pages
/Count 1
/Kids [ 4 0 R ]
>>
endobj
3 0 obj
<<
/Type /Catalog
/Pages 2 0 R
>>
endobj
4 0 obj
<<
/Type /Page
/Resources <<
/Font <<
/F1 <<
/Type /Font
/Subtype /Type1
/BaseFont /Helvetica
>>
>>
>>
/MediaBox [ 0.0 0.0 200 200 ]
/Contents 5 0 R
/Parent 2 0 R
>>
endobj
5 0 obj
<<
/Length 42
>>
stream
,N<><6~<7E>)<29><><EFBFBD><EFBFBD><EFBFBD>u<EFBFBD> <0C><><EFBFBD>Zc'<27><>>8g<38><67><EFBFBD>n<EFBFBD><6E><EFBFBD><EFBFBD><EFBFBD>9"
endstream
endobj
6 0 obj
<<
/V 2
/R 3
/Length 128
/P 4294967292
/Filter /Standard
/O <6a340a292629053da84a6d8b19a5d505953b8b3fdac3d2d389fde0e354528d44>
/U <d6f0dc91c7b9de264a8d708515468e6528bf4e5e4e758a4164004e56fffa0108>
>>
endobj
xref
0 7
0000000000 65535 f
0000000015 00000 n
0000000059 00000 n
0000000118 00000 n
0000000167 00000 n
0000000348 00000 n
0000000440 00000 n
trailer
<<
/Size 7
/Root 3 0 R
/Info 1 0 R
/ID [ <6364336635356135633239323638353039306635656133623165313637366430> <6364336635356135633239323638353039306635656133623165313637366430> ]
/Encrypt 6 0 R
>>
startxref
655
%%EOF

View File

@@ -54,6 +54,12 @@ class TestReadPdfFile:
text, _, _ = read_pdf_file(_load("encrypted.pdf"), pdf_pass="wrong")
assert text == ""
def test_owner_password_only_pdf_extracts_text(self) -> None:
"""A PDF encrypted with only an owner password (no user password)
should still yield its text content. Regression for #9754."""
text, _, _ = read_pdf_file(_load("owner_protected.pdf"))
assert "Hello World" in text
def test_empty_pdf(self) -> None:
text, _, _ = read_pdf_file(_load("empty.pdf"))
assert text.strip() == ""
@@ -117,6 +123,12 @@ class TestIsPdfProtected:
def test_protected_pdf(self) -> None:
assert is_pdf_protected(_load("encrypted.pdf")) is True
def test_owner_password_only_is_not_protected(self) -> None:
"""A PDF with only an owner password (permission restrictions) but no
user password should NOT be considered protected — any viewer can open
it without prompting for a password."""
assert is_pdf_protected(_load("owner_protected.pdf")) is False
def test_preserves_file_position(self) -> None:
pdf = _load("simple.pdf")
pdf.seek(42)

View File

@@ -0,0 +1,79 @@
import io
from pptx import Presentation # type: ignore[import-untyped]
from pptx.chart.data import CategoryChartData # type: ignore[import-untyped]
from pptx.enum.chart import XL_CHART_TYPE # type: ignore[import-untyped]
from pptx.util import Inches # type: ignore[import-untyped]
from onyx.file_processing.extract_file_text import pptx_to_text
def _make_pptx_with_chart() -> io.BytesIO:
"""Create an in-memory pptx with one text slide and one chart slide."""
prs = Presentation()
# Slide 1: text only
slide1 = prs.slides.add_slide(prs.slide_layouts[1])
slide1.shapes.title.text = "Introduction"
slide1.placeholders[1].text = "This is the first slide."
# Slide 2: chart
slide2 = prs.slides.add_slide(prs.slide_layouts[5]) # Blank layout
chart_data = CategoryChartData()
chart_data.categories = ["Q1", "Q2", "Q3"]
chart_data.add_series("Revenue", (100, 200, 300))
slide2.shapes.add_chart(
XL_CHART_TYPE.COLUMN_CLUSTERED,
Inches(1),
Inches(1),
Inches(6),
Inches(4),
chart_data,
)
buf = io.BytesIO()
prs.save(buf)
buf.seek(0)
return buf
def _make_pptx_without_chart() -> io.BytesIO:
"""Create an in-memory pptx with a single text-only slide."""
prs = Presentation()
slide = prs.slides.add_slide(prs.slide_layouts[1])
slide.shapes.title.text = "Hello World"
slide.placeholders[1].text = "Some content here."
buf = io.BytesIO()
prs.save(buf)
buf.seek(0)
return buf
class TestPptxToText:
def test_chart_is_omitted(self) -> None:
# Precondition
pptx_file = _make_pptx_with_chart()
# Under test
result = pptx_to_text(pptx_file)
# Postcondition
assert "Introduction" in result
assert "first slide" in result
assert "[chart omitted]" in result
# The actual chart data should NOT appear in the output.
assert "Revenue" not in result
assert "Q1" not in result
def test_text_only_pptx(self) -> None:
# Precondition
pptx_file = _make_pptx_without_chart()
# Under test
result = pptx_to_text(pptx_file)
# Postcondition
assert "Hello World" in result
assert "Some content" in result
assert "[chart omitted]" not in result

View File

@@ -70,6 +70,10 @@ backend = [
"lazy_imports==1.0.1",
"lxml==5.3.0",
"Mako==1.2.4",
# NOTE: Do not update without understanding the patching behavior in
# get_markitdown_converter in
# backend/onyx/file_processing/extract_file_text.py and what impacts
# updating might have on this behavior.
"markitdown[pdf, docx, pptx, xlsx, xls]==0.1.2",
"mcp[cli]==1.26.0",
"msal==1.34.0",

View File

@@ -32,8 +32,10 @@ import {
OpenRouterFetchParams,
LiteLLMProxyFetchParams,
BifrostFetchParams,
OpenAICompatibleFetchParams,
OpenAICompatibleModelResponse,
} from "@/interfaces/llm";
import { SvgAws, SvgBifrost, SvgOpenrouter } from "@opal/icons";
import { SvgAws, SvgBifrost, SvgOpenrouter, SvgPlug } from "@opal/icons";
// Aggregator providers that host models from multiple vendors
export const AGGREGATOR_PROVIDERS = new Set([
@@ -44,6 +46,7 @@ export const AGGREGATOR_PROVIDERS = new Set([
"lm_studio",
"litellm_proxy",
"bifrost",
"openai_compatible",
"vertex_ai",
]);
@@ -82,6 +85,7 @@ export const getProviderIcon = (
openrouter: SvgOpenrouter,
litellm_proxy: LiteLLMIcon,
bifrost: SvgBifrost,
openai_compatible: SvgPlug,
vertex_ai: GeminiIcon,
};
@@ -411,6 +415,64 @@ export const fetchBifrostModels = async (
}
};
/**
* Fetches models from a generic OpenAI-compatible server.
* Uses snake_case params to match API structure.
*/
export const fetchOpenAICompatibleModels = async (
params: OpenAICompatibleFetchParams
): Promise<{ models: ModelConfiguration[]; error?: string }> => {
const apiBase = params.api_base;
if (!apiBase) {
return { models: [], error: "API Base is required" };
}
try {
const response = await fetch(
"/api/admin/llm/openai-compatible/available-models",
{
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
api_base: apiBase,
api_key: params.api_key,
provider_name: params.provider_name,
}),
signal: params.signal,
}
);
if (!response.ok) {
let errorMessage = "Failed to fetch models";
try {
const errorData = await response.json();
errorMessage = errorData.detail || errorData.message || errorMessage;
} catch {
// ignore JSON parsing errors
}
return { models: [], error: errorMessage };
}
const data: OpenAICompatibleModelResponse[] = await response.json();
const models: ModelConfiguration[] = data.map((modelData) => ({
name: modelData.name,
display_name: modelData.display_name,
is_visible: true,
max_input_tokens: modelData.max_input_tokens,
supports_image_input: modelData.supports_image_input,
supports_reasoning: modelData.supports_reasoning,
}));
return { models };
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return { models: [], error: errorMessage };
}
};
/**
* Fetches LiteLLM Proxy models directly without any form state dependencies.
* Uses snake_case params to match API structure.
@@ -531,6 +593,13 @@ export const fetchModels = async (
provider_name: formValues.name,
signal,
});
case LLMProviderName.OPENAI_COMPATIBLE:
return fetchOpenAICompatibleModels({
api_base: formValues.api_base,
api_key: formValues.api_key,
provider_name: formValues.name,
signal,
});
default:
return { models: [], error: `Unknown provider: ${providerName}` };
}
@@ -545,6 +614,7 @@ export function canProviderFetchModels(providerName?: string) {
case LLMProviderName.OPENROUTER:
case LLMProviderName.LITELLM_PROXY:
case LLMProviderName.BIFROST:
case LLMProviderName.OPENAI_COMPATIBLE:
return true;
default:
return false;

View File

@@ -133,7 +133,7 @@ async function createFederatedConnector(
async function updateFederatedConnector(
id: number,
credentials: CredentialForm,
credentials: CredentialForm | null,
config?: ConfigForm
): Promise<{ success: boolean; message: string }> {
try {
@@ -143,7 +143,7 @@ async function updateFederatedConnector(
"Content-Type": "application/json",
},
body: JSON.stringify({
credentials,
credentials: credentials ?? undefined,
config: config || {},
}),
});
@@ -201,7 +201,9 @@ export function FederatedConnectorForm({
const isEditMode = connectorId !== undefined;
const [formState, setFormState] = useState<FormState>({
credentials: preloadedConnectorData?.credentials || {},
// In edit mode, don't populate credentials with masked values from the API.
// Masked values (e.g. "••••••••••••") would be saved back and corrupt the real credentials.
credentials: isEditMode ? {} : preloadedConnectorData?.credentials || {},
config: preloadedConnectorData?.config || {},
schema: preloadedCredentialSchema?.credentials || null,
configurationSchema: null,
@@ -209,6 +211,7 @@ export function FederatedConnectorForm({
configurationSchemaError: null,
connectorError: null,
});
const [credentialsModified, setCredentialsModified] = useState(false);
const [isSubmitting, setIsSubmitting] = useState(false);
const [submitMessage, setSubmitMessage] = useState<string | null>(null);
const [submitSuccess, setSubmitSuccess] = useState<boolean | null>(null);
@@ -333,6 +336,7 @@ export function FederatedConnectorForm({
}
const handleCredentialChange = (key: string, value: string) => {
setCredentialsModified(true);
setFormState((prev) => ({
...prev,
credentials: {
@@ -354,6 +358,11 @@ export function FederatedConnectorForm({
const handleValidateCredentials = async () => {
if (!formState.schema) return;
if (isEditMode && !credentialsModified) {
setSubmitMessage("Enter new credential values before validating.");
setSubmitSuccess(false);
return;
}
setIsValidating(true);
setSubmitMessage(null);
@@ -411,8 +420,10 @@ export function FederatedConnectorForm({
setSubmitSuccess(null);
try {
// Validate required fields
if (formState.schema) {
const shouldValidateCredentials = !isEditMode || credentialsModified;
// Validate required fields (skip for credentials in edit mode when unchanged)
if (formState.schema && shouldValidateCredentials) {
const missingRequired = Object.entries(formState.schema)
.filter(
([key, field]) => field.required && !formState.credentials[key]
@@ -442,16 +453,20 @@ export function FederatedConnectorForm({
}
setConfigValidationErrors({});
// Validate credentials before creating/updating
const validation = await validateCredentials(
connector,
formState.credentials
);
if (!validation.success) {
setSubmitMessage(`Credential validation failed: ${validation.message}`);
setSubmitSuccess(false);
setIsSubmitting(false);
return;
// Validate credentials before creating/updating (skip in edit mode when unchanged)
if (shouldValidateCredentials) {
const validation = await validateCredentials(
connector,
formState.credentials
);
if (!validation.success) {
setSubmitMessage(
`Credential validation failed: ${validation.message}`
);
setSubmitSuccess(false);
setIsSubmitting(false);
return;
}
}
// Create or update the connector
@@ -459,7 +474,7 @@ export function FederatedConnectorForm({
isEditMode && connectorId
? await updateFederatedConnector(
connectorId,
formState.credentials,
credentialsModified ? formState.credentials : null,
formState.config
)
: await createFederatedConnector(
@@ -538,14 +553,16 @@ export function FederatedConnectorForm({
id={fieldKey}
type={fieldSpec.secret ? "password" : "text"}
placeholder={
fieldSpec.example
? String(fieldSpec.example)
: fieldSpec.description
isEditMode && !credentialsModified
? "•••••••• (leave blank to keep current value)"
: fieldSpec.example
? String(fieldSpec.example)
: fieldSpec.description
}
value={formState.credentials[fieldKey] || ""}
onChange={(e) => handleCredentialChange(fieldKey, e.target.value)}
className="w-96"
required={fieldSpec.required}
required={!isEditMode && fieldSpec.required}
/>
</div>
))}

View File

@@ -14,6 +14,7 @@ export enum LLMProviderName {
BEDROCK = "bedrock",
LITELLM_PROXY = "litellm_proxy",
BIFROST = "bifrost",
OPENAI_COMPATIBLE = "openai_compatible",
CUSTOM = "custom",
}
@@ -181,6 +182,21 @@ export interface BifrostModelResponse {
supports_reasoning: boolean;
}
export interface OpenAICompatibleFetchParams {
api_base?: string;
api_key?: string;
provider_name?: string;
signal?: AbortSignal;
}
export interface OpenAICompatibleModelResponse {
name: string;
display_name: string;
max_input_tokens: number | null;
supports_image_input: boolean;
supports_reasoning: boolean;
}
export interface VertexAIFetchParams {
model_configurations?: ModelConfiguration[];
}
@@ -199,5 +215,6 @@ export type FetchModelsParams =
| OpenRouterFetchParams
| LiteLLMProxyFetchParams
| BifrostFetchParams
| OpenAICompatibleFetchParams
| VertexAIFetchParams
| LMStudioFetchParams;

View File

@@ -8,6 +8,7 @@ import {
SvgCloud,
SvgAws,
SvgOpenrouter,
SvgPlug,
SvgServer,
SvgAzure,
SvgGemini,
@@ -28,6 +29,7 @@ const PROVIDER_ICONS: Record<string, IconFunctionComponent> = {
[LLMProviderName.OPENROUTER]: SvgOpenrouter,
[LLMProviderName.LM_STUDIO]: SvgLmStudio,
[LLMProviderName.BIFROST]: SvgBifrost,
[LLMProviderName.OPENAI_COMPATIBLE]: SvgPlug,
// fallback
[LLMProviderName.CUSTOM]: SvgServer,
@@ -45,6 +47,7 @@ const PROVIDER_PRODUCT_NAMES: Record<string, string> = {
[LLMProviderName.OPENROUTER]: "OpenRouter",
[LLMProviderName.LM_STUDIO]: "LM Studio",
[LLMProviderName.BIFROST]: "Bifrost",
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI Compatible",
// fallback
[LLMProviderName.CUSTOM]: "Custom Models",
@@ -62,6 +65,7 @@ const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
[LLMProviderName.OPENROUTER]: "OpenRouter",
[LLMProviderName.LM_STUDIO]: "LM Studio",
[LLMProviderName.BIFROST]: "Bifrost",
[LLMProviderName.OPENAI_COMPATIBLE]: "OpenAI Compatible",
// fallback
[LLMProviderName.CUSTOM]: "Other providers or self-hosted",

View File

@@ -1,8 +1,7 @@
"use client";
import { useMemo, useState } from "react";
import { useState } from "react";
import { useRouter } from "next/navigation";
import useSWR from "swr";
import { Table, Button } from "@opal/components";
import { IllustrationContent } from "@opal/layouts";
import { SvgUsers } from "@opal/icons";
@@ -14,16 +13,14 @@ import Text from "@/refresh-components/texts/Text";
import SimpleLoader from "@/refresh-components/loaders/SimpleLoader";
import Separator from "@/refresh-components/Separator";
import { toast } from "@/hooks/useToast";
import { errorHandlingFetcher } from "@/lib/fetcher";
import useAdminUsers from "@/hooks/useAdminUsers";
import type { ApiKeyDescriptor, MemberRow } from "./interfaces";
import useGroupMemberCandidates from "./useGroupMemberCandidates";
import {
createGroup,
updateAgentGroupSharing,
updateDocSetGroupSharing,
saveTokenLimits,
} from "./svc";
import { apiKeyToMemberRow, memberTableColumns, PAGE_SIZE } from "./shared";
import { memberTableColumns, PAGE_SIZE } from "./shared";
import SharedGroupResources from "@/refresh-pages/admin/GroupsPage/SharedGroupResources";
import TokenLimitSection from "./TokenLimitSection";
import type { TokenLimit } from "./TokenLimitSection";
@@ -41,22 +38,7 @@ function CreateGroupPage() {
{ tokenBudget: null, periodHours: null },
]);
const { users, isLoading: usersLoading, error: usersError } = useAdminUsers();
const {
data: apiKeys,
isLoading: apiKeysLoading,
error: apiKeysError,
} = useSWR<ApiKeyDescriptor[]>("/api/admin/api-key", errorHandlingFetcher);
const isLoading = usersLoading || apiKeysLoading;
const error = usersError ?? apiKeysError;
const allRows: MemberRow[] = useMemo(() => {
const activeUsers = users.filter((u) => u.is_active);
const serviceAccountRows = (apiKeys ?? []).map(apiKeyToMemberRow);
return [...activeUsers, ...serviceAccountRows];
}, [users, apiKeys]);
const { rows: allRows, isLoading, error } = useGroupMemberCandidates();
async function handleCreate() {
const trimmed = groupName.trim();
@@ -133,11 +115,11 @@ function CreateGroupPage() {
{/* Members table */}
{isLoading && <SimpleLoader />}
{error && (
{error ? (
<Text as="p" secondaryBody text03>
Failed to load users.
</Text>
)}
) : null}
{!isLoading && !error && (
<Section

View File

@@ -3,6 +3,7 @@
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { useRouter } from "next/navigation";
import useSWR, { useSWRConfig } from "swr";
import useGroupMemberCandidates from "./useGroupMemberCandidates";
import { Table, Button } from "@opal/components";
import { IllustrationContent } from "@opal/layouts";
import { SvgUsers, SvgTrash, SvgMinusCircle, SvgPlusCircle } from "@opal/icons";
@@ -19,20 +20,9 @@ import ConfirmationModalLayout from "@/refresh-components/layouts/ConfirmationMo
import Separator from "@/refresh-components/Separator";
import { toast } from "@/hooks/useToast";
import { errorHandlingFetcher } from "@/lib/fetcher";
import useAdminUsers from "@/hooks/useAdminUsers";
import type { UserGroup } from "@/lib/types";
import type {
ApiKeyDescriptor,
MemberRow,
TokenRateLimitDisplay,
} from "./interfaces";
import {
apiKeyToMemberRow,
baseColumns,
memberTableColumns,
tc,
PAGE_SIZE,
} from "./shared";
import type { MemberRow, TokenRateLimitDisplay } from "./interfaces";
import { baseColumns, memberTableColumns, tc, PAGE_SIZE } from "./shared";
import {
USER_GROUP_URL,
renameGroup,
@@ -104,18 +94,15 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
const initialAgentIdsRef = useRef<number[]>([]);
const initialDocSetIdsRef = useRef<number[]>([]);
// Users and API keys
const { users, isLoading: usersLoading, error: usersError } = useAdminUsers();
// Users + service accounts (curator-accessible — see hook docs).
const {
data: apiKeys,
isLoading: apiKeysLoading,
error: apiKeysError,
} = useSWR<ApiKeyDescriptor[]>("/api/admin/api-key", errorHandlingFetcher);
rows: allRows,
isLoading: candidatesLoading,
error: candidatesError,
} = useGroupMemberCandidates();
const isLoading =
groupLoading || usersLoading || apiKeysLoading || tokenLimitsLoading;
const error = groupError ?? usersError ?? apiKeysError;
const isLoading = groupLoading || candidatesLoading || tokenLimitsLoading;
const error = groupError ?? candidatesError;
// Pre-populate form when group data loads
useEffect(() => {
@@ -145,12 +132,6 @@ function EditGroupPage({ groupId }: EditGroupPageProps) {
}
}, [tokenRateLimits]);
const allRows = useMemo(() => {
const activeUsers = users.filter((u) => u.is_active);
const serviceAccountRows = (apiKeys ?? []).map(apiKeyToMemberRow);
return [...activeUsers, ...serviceAccountRows];
}, [users, apiKeys]);
const memberRows = useMemo(() => {
const selected = new Set(selectedUserIds);
return allRows.filter((r) => selected.has(r.id ?? r.email));

View File

@@ -0,0 +1,161 @@
"use client";
import { useMemo } from "react";
import useSWR from "swr";
import { errorHandlingFetcher } from "@/lib/fetcher";
import { useUser } from "@/providers/UserProvider";
// Curator-accessible listing of all users (and service-account entries via
// `?include_api_keys=true`). The admin-only `/manage/users/accepted/all` and
// `/manage/users/invited` endpoints 403 for global curators, which used to
// break the Edit Group page entirely — see useGroupMemberCandidates docs.
const GROUP_MEMBER_CANDIDATES_URL = "/api/manage/users?include_api_keys=true";
const ADMIN_API_KEYS_URL = "/api/admin/api-key";
import { UserStatus, type UserRole } from "@/lib/types";
// Mirrors `DANSWER_API_KEY_DUMMY_EMAIL_DOMAIN` on the backend; service-account
// users are identified by this email suffix because release/v3.1 does not yet
// expose `account_type` on the FullUserSnapshot returned from `/manage/users`.
const API_KEY_EMAIL_SUFFIX = "@onyxapikey.ai";
function isApiKeyEmail(email: string): boolean {
return email.endsWith(API_KEY_EMAIL_SUFFIX);
}
import type {
UserGroupInfo,
UserRow,
} from "@/refresh-pages/admin/UsersPage/interfaces";
import type { ApiKeyDescriptor, MemberRow } from "./interfaces";
// Backend response shape for `/api/manage/users?include_api_keys=true`. The
// existing `AllUsersResponse` in `lib/types.ts` types `accepted` as `User[]`,
// which is missing fields the table needs (`personal_name`, `account_type`,
// `groups`, etc.), so we declare an accurate local type here.
interface FullUserSnapshot {
id: string;
email: string;
role: UserRole;
is_active: boolean;
password_configured: boolean;
personal_name: string | null;
created_at: string;
updated_at: string;
groups: UserGroupInfo[];
is_scim_synced: boolean;
}
interface ManageUsersResponse {
accepted: FullUserSnapshot[];
invited: { email: string }[];
slack_users: FullUserSnapshot[];
accepted_pages: number;
invited_pages: number;
slack_users_pages: number;
}
function snapshotToMemberRow(snapshot: FullUserSnapshot): MemberRow {
return {
id: snapshot.id,
email: snapshot.email,
role: snapshot.role,
status: snapshot.is_active ? UserStatus.ACTIVE : UserStatus.INACTIVE,
is_active: snapshot.is_active,
is_scim_synced: snapshot.is_scim_synced,
personal_name: snapshot.personal_name,
created_at: snapshot.created_at,
updated_at: snapshot.updated_at,
groups: snapshot.groups,
};
}
function serviceAccountToMemberRow(
snapshot: FullUserSnapshot,
apiKey: ApiKeyDescriptor | undefined
): MemberRow {
return {
id: snapshot.id,
email: "Service Account",
role: apiKey?.api_key_role ?? snapshot.role,
status: UserStatus.ACTIVE,
is_active: true,
is_scim_synced: false,
personal_name:
apiKey?.api_key_name ?? snapshot.personal_name ?? "Unnamed Key",
created_at: null,
updated_at: null,
groups: [],
api_key_display: apiKey?.api_key_display,
};
}
interface UseGroupMemberCandidatesResult {
/** Active users + service-account rows, in the order the table expects. */
rows: MemberRow[];
/** Subset of `rows` representing real (non-service-account) users. */
userRows: MemberRow[];
isLoading: boolean;
error: unknown;
}
/**
* Returns the candidate list for the group create/edit member pickers.
*
* Hits `/api/manage/users?include_api_keys=true`, which is gated by
* `current_curator_or_admin_user` on the backend, so this works for both
* admins and global curators (the admin-only `/accepted/all` and `/invited`
* endpoints used to be called here, which 403'd for global curators and broke
* the Edit Group page entirely).
*
* For admins, we additionally fetch `/admin/api-key` to enrich service-account
* rows with the masked api-key display string. That call is admin-only and is
* skipped for curators; its failure is non-fatal.
*/
export default function useGroupMemberCandidates(): UseGroupMemberCandidatesResult {
const { isAdmin } = useUser();
const {
data: usersData,
isLoading: usersLoading,
error: usersError,
} = useSWR<ManageUsersResponse>(
GROUP_MEMBER_CANDIDATES_URL,
errorHandlingFetcher
);
const { data: apiKeys, isLoading: apiKeysLoading } = useSWR<
ApiKeyDescriptor[]
>(isAdmin ? ADMIN_API_KEYS_URL : null, errorHandlingFetcher);
const apiKeysByUserId = useMemo(() => {
const map = new Map<string, ApiKeyDescriptor>();
for (const key of apiKeys ?? []) map.set(key.user_id, key);
return map;
}, [apiKeys]);
const { rows, userRows } = useMemo(() => {
const accepted = usersData?.accepted ?? [];
const userRowsLocal: MemberRow[] = [];
const serviceAccountRows: MemberRow[] = [];
for (const snapshot of accepted) {
if (!snapshot.is_active) continue;
if (isApiKeyEmail(snapshot.email)) {
serviceAccountRows.push(
serviceAccountToMemberRow(snapshot, apiKeysByUserId.get(snapshot.id))
);
} else {
userRowsLocal.push(snapshotToMemberRow(snapshot));
}
}
return {
rows: [...userRowsLocal, ...serviceAccountRows],
userRows: userRowsLocal,
};
}, [usersData, apiKeysByUserId]);
return {
rows,
userRows,
isLoading: usersLoading || (isAdmin && apiKeysLoading),
error: usersError,
};
}

View File

@@ -46,6 +46,7 @@ import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import LMStudioForm from "@/sections/modals/llmConfig/LMStudioForm";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
import { Section } from "@/layouts/general-layouts";
const route = ADMIN_ROUTES.LLM_MODELS;
@@ -67,6 +68,7 @@ const PROVIDER_DISPLAY_ORDER: string[] = [
"openrouter",
"lm_studio",
"bifrost",
"openai_compatible",
];
const PROVIDER_MODAL_MAP: Record<
@@ -147,6 +149,13 @@ const PROVIDER_MODAL_MAP: Record<
onOpenChange={onOpenChange}
/>
),
openai_compatible: (d, open, onOpenChange) => (
<OpenAICompatibleModal
shouldMarkAsDefault={d}
open={open}
onOpenChange={onOpenChange}
/>
),
};
// ============================================================================

View File

@@ -0,0 +1,267 @@
"use client";
import { useState, useEffect } from "react";
import { markdown } from "@opal/utils";
import { useSWRConfig } from "swr";
import { Formik, FormikProps } from "formik";
import InputTypeInField from "@/refresh-components/form/InputTypeInField";
import PasswordInputTypeInField from "@/refresh-components/form/PasswordInputTypeInField";
import * as InputLayouts from "@/layouts/input-layouts";
import {
LLMProviderFormProps,
LLMProviderName,
LLMProviderView,
ModelConfiguration,
} from "@/interfaces/llm";
import { fetchOpenAICompatibleModels } from "@/app/admin/configuration/llm/utils";
import * as Yup from "yup";
import { useWellKnownLLMProvider } from "@/hooks/useLLMProviders";
import {
buildDefaultInitialValues,
buildDefaultValidationSchema,
buildAvailableModelConfigurations,
buildOnboardingInitialValues,
BaseLLMFormValues,
} from "@/sections/modals/llmConfig/utils";
import {
submitLLMProvider,
submitOnboardingProvider,
} from "@/sections/modals/llmConfig/svc";
import {
ModelsField,
DisplayNameField,
ModelsAccessField,
FieldSeparator,
FieldWrapper,
LLMConfigurationModalWrapper,
} from "@/sections/modals/llmConfig/shared";
import { toast } from "@/hooks/useToast";
const OPENAI_COMPATIBLE_PROVIDER = LLMProviderName.OPENAI_COMPATIBLE;
const DEFAULT_API_BASE = "";
interface OpenAICompatibleModalValues extends BaseLLMFormValues {
api_key: string;
api_base: string;
}
interface OpenAICompatibleModalInternalsProps {
formikProps: FormikProps<OpenAICompatibleModalValues>;
existingLlmProvider: LLMProviderView | undefined;
fetchedModels: ModelConfiguration[];
setFetchedModels: (models: ModelConfiguration[]) => void;
modelConfigurations: ModelConfiguration[];
isTesting: boolean;
onClose: () => void;
isOnboarding: boolean;
}
function OpenAICompatibleModalInternals({
formikProps,
existingLlmProvider,
fetchedModels,
setFetchedModels,
modelConfigurations,
isTesting,
onClose,
isOnboarding,
}: OpenAICompatibleModalInternalsProps) {
const currentModels =
fetchedModels.length > 0
? fetchedModels
: existingLlmProvider?.model_configurations || modelConfigurations;
const isFetchDisabled = !formikProps.values.api_base;
const handleFetchModels = async () => {
const { models, error } = await fetchOpenAICompatibleModels({
api_base: formikProps.values.api_base,
api_key: formikProps.values.api_key || undefined,
provider_name: existingLlmProvider?.name,
});
if (error) {
throw new Error(error);
}
setFetchedModels(models);
};
// Auto-fetch models on initial load when editing an existing provider
useEffect(() => {
if (existingLlmProvider && !isFetchDisabled) {
handleFetchModels().catch((err) => {
toast.error(
err instanceof Error ? err.message : "Failed to fetch models"
);
});
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return (
<LLMConfigurationModalWrapper
providerEndpoint={LLMProviderName.OPENAI_COMPATIBLE}
existingProviderName={existingLlmProvider?.name}
onClose={onClose}
isFormValid={formikProps.isValid}
isDirty={formikProps.dirty}
isTesting={isTesting}
isSubmitting={formikProps.isSubmitting}
>
<FieldWrapper>
<InputLayouts.Vertical
name="api_base"
title="API Base URL"
subDescription="The base URL of your OpenAI-compatible server."
>
<InputTypeInField
name="api_base"
placeholder="http://localhost:8000/v1"
/>
</InputLayouts.Vertical>
</FieldWrapper>
<FieldWrapper>
<InputLayouts.Vertical
name="api_key"
title="API Key"
optional
subDescription={markdown(
"Provide an API key if your server requires authentication."
)}
>
<PasswordInputTypeInField name="api_key" placeholder="API Key" />
</InputLayouts.Vertical>
</FieldWrapper>
{!isOnboarding && (
<>
<FieldSeparator />
<DisplayNameField disabled={!!existingLlmProvider} />
</>
)}
<FieldSeparator />
<ModelsField
modelConfigurations={currentModels}
formikProps={formikProps}
recommendedDefaultModel={null}
shouldShowAutoUpdateToggle={false}
onRefetch={isFetchDisabled ? undefined : handleFetchModels}
/>
{!isOnboarding && (
<>
<FieldSeparator />
<ModelsAccessField formikProps={formikProps} />
</>
)}
</LLMConfigurationModalWrapper>
);
}
export default function OpenAICompatibleModal({
variant = "llm-configuration",
existingLlmProvider,
shouldMarkAsDefault,
open,
onOpenChange,
defaultModelName,
onboardingState,
onboardingActions,
llmDescriptor,
}: LLMProviderFormProps) {
const [fetchedModels, setFetchedModels] = useState<ModelConfiguration[]>([]);
const [isTesting, setIsTesting] = useState(false);
const isOnboarding = variant === "onboarding";
const { mutate } = useSWRConfig();
const { wellKnownLLMProvider } = useWellKnownLLMProvider(
OPENAI_COMPATIBLE_PROVIDER
);
if (open === false) return null;
const onClose = () => onOpenChange?.(false);
const modelConfigurations = buildAvailableModelConfigurations(
existingLlmProvider,
wellKnownLLMProvider ?? llmDescriptor
);
const initialValues: OpenAICompatibleModalValues = isOnboarding
? ({
...buildOnboardingInitialValues(),
name: OPENAI_COMPATIBLE_PROVIDER,
provider: OPENAI_COMPATIBLE_PROVIDER,
api_key: "",
api_base: DEFAULT_API_BASE,
default_model_name: "",
} as OpenAICompatibleModalValues)
: {
...buildDefaultInitialValues(
existingLlmProvider,
modelConfigurations,
defaultModelName
),
api_key: existingLlmProvider?.api_key ?? "",
api_base: existingLlmProvider?.api_base ?? DEFAULT_API_BASE,
};
const validationSchema = buildDefaultValidationSchema().shape({
api_base: Yup.string().required("API Base URL is required"),
});
return (
<Formik
initialValues={initialValues}
validationSchema={validationSchema}
validateOnMount={true}
onSubmit={async (values, { setSubmitting }) => {
if (isOnboarding && onboardingState && onboardingActions) {
const modelConfigsToUse =
fetchedModels.length > 0 ? fetchedModels : [];
await submitOnboardingProvider({
providerName: OPENAI_COMPATIBLE_PROVIDER,
payload: {
...values,
model_configurations: modelConfigsToUse,
},
onboardingState,
onboardingActions,
isCustomProvider: false,
onClose,
setIsSubmitting: setSubmitting,
});
} else {
await submitLLMProvider({
providerName: OPENAI_COMPATIBLE_PROVIDER,
values,
initialValues,
modelConfigurations:
fetchedModels.length > 0 ? fetchedModels : modelConfigurations,
existingLlmProvider,
shouldMarkAsDefault,
setIsTesting,
mutate,
onClose,
setSubmitting,
});
}
}}
>
{(formikProps) => (
<OpenAICompatibleModalInternals
formikProps={formikProps}
existingLlmProvider={existingLlmProvider}
fetchedModels={fetchedModels}
setFetchedModels={setFetchedModels}
modelConfigurations={modelConfigurations}
isTesting={isTesting}
onClose={onClose}
isOnboarding={isOnboarding}
/>
)}
</Formik>
);
}

View File

@@ -10,6 +10,7 @@ import BedrockModal from "@/sections/modals/llmConfig/BedrockModal";
import LMStudioForm from "@/sections/modals/llmConfig/LMStudioForm";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import BifrostModal from "@/sections/modals/llmConfig/BifrostModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
function detectIfRealOpenAIProvider(provider: LLMProviderView) {
return (
@@ -59,6 +60,8 @@ export function getModalForExistingProvider(
return <LiteLLMProxyModal {...props} />;
case LLMProviderName.BIFROST:
return <BifrostModal {...props} />;
case LLMProviderName.OPENAI_COMPATIBLE:
return <OpenAICompatibleModal {...props} />;
default:
return <CustomModal {...props} />;
}

View File

@@ -14,6 +14,7 @@ import OpenRouterModal from "@/sections/modals/llmConfig/OpenRouterModal";
import CustomModal from "@/sections/modals/llmConfig/CustomModal";
import LMStudioForm from "@/sections/modals/llmConfig/LMStudioForm";
import LiteLLMProxyModal from "@/sections/modals/llmConfig/LiteLLMProxyModal";
import OpenAICompatibleModal from "@/sections/modals/llmConfig/OpenAICompatibleModal";
// Display info for LLM provider cards - title is the product name, displayName is the company/platform
const PROVIDER_DISPLAY_INFO: Record<
@@ -47,6 +48,10 @@ const PROVIDER_DISPLAY_INFO: Record<
title: "LiteLLM Proxy",
displayName: "LiteLLM Proxy",
},
[LLMProviderName.OPENAI_COMPATIBLE]: {
title: "OpenAI Compatible",
displayName: "OpenAI Compatible",
},
};
export function getProviderDisplayInfo(providerName: string): {
@@ -124,6 +129,9 @@ export function getOnboardingForm({
case LLMProviderName.LITELLM_PROXY:
return <LiteLLMProxyModal {...providerProps} />;
case LLMProviderName.OPENAI_COMPATIBLE:
return <OpenAICompatibleModal {...providerProps} />;
default:
return <CustomModal {...sharedProps} />;
}